Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * A code-rewriter that enables instruction single-stepping. | |
867e359b CM |
15 | */ |
16 | ||
2f9ac29e CM |
17 | #include <linux/smp.h> |
18 | #include <linux/ptrace.h> | |
867e359b CM |
19 | #include <linux/slab.h> |
20 | #include <linux/thread_info.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/mman.h> | |
23 | #include <linux/types.h> | |
0707ad30 | 24 | #include <linux/err.h> |
2f9ac29e | 25 | #include <linux/prctl.h> |
867e359b | 26 | #include <asm/cacheflush.h> |
2f9ac29e CM |
27 | #include <asm/traps.h> |
28 | #include <asm/uaccess.h> | |
bd119c69 | 29 | #include <asm/unaligned.h> |
867e359b | 30 | #include <arch/abi.h> |
2f9ac29e | 31 | #include <arch/spr_def.h> |
eb7c792d | 32 | #include <arch/opcode.h> |
867e359b | 33 | |
867e359b | 34 | |
2f9ac29e | 35 | #ifndef __tilegx__ /* Hardware support for single step unavailable. */ |
867e359b | 36 | |
2f9ac29e CM |
37 | #define signExtend17(val) sign_extend((val), 17) |
38 | #define TILE_X1_MASK (0xffffffffULL << 31) | |
867e359b CM |
39 | |
40 | enum mem_op { | |
41 | MEMOP_NONE, | |
42 | MEMOP_LOAD, | |
43 | MEMOP_STORE, | |
44 | MEMOP_LOAD_POSTINCR, | |
45 | MEMOP_STORE_POSTINCR | |
46 | }; | |
47 | ||
2f9ac29e CM |
48 | static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n, |
49 | s32 offset) | |
867e359b | 50 | { |
2f9ac29e | 51 | tilepro_bundle_bits result; |
867e359b CM |
52 | |
53 | /* mask out the old offset */ | |
2f9ac29e | 54 | tilepro_bundle_bits mask = create_BrOff_X1(-1); |
867e359b CM |
55 | result = n & (~mask); |
56 | ||
57 | /* or in the new offset */ | |
58 | result |= create_BrOff_X1(offset); | |
59 | ||
60 | return result; | |
61 | } | |
62 | ||
2f9ac29e CM |
63 | static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest, |
64 | int src) | |
867e359b | 65 | { |
2f9ac29e CM |
66 | tilepro_bundle_bits result; |
67 | tilepro_bundle_bits op; | |
867e359b CM |
68 | |
69 | result = n & (~TILE_X1_MASK); | |
70 | ||
71 | op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) | | |
72 | create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) | | |
73 | create_Dest_X1(dest) | | |
74 | create_SrcB_X1(TREG_ZERO) | | |
75 | create_SrcA_X1(src) ; | |
76 | ||
77 | result |= op; | |
78 | return result; | |
79 | } | |
80 | ||
2f9ac29e | 81 | static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n) |
867e359b CM |
82 | { |
83 | return move_X1(n, TREG_ZERO, TREG_ZERO); | |
84 | } | |
85 | ||
2f9ac29e CM |
86 | static inline tilepro_bundle_bits addi_X1( |
87 | tilepro_bundle_bits n, int dest, int src, int imm) | |
867e359b CM |
88 | { |
89 | n &= ~TILE_X1_MASK; | |
90 | ||
91 | n |= (create_SrcA_X1(src) | | |
92 | create_Dest_X1(dest) | | |
93 | create_Imm8_X1(imm) | | |
94 | create_S_X1(0) | | |
95 | create_Opcode_X1(IMM_0_OPCODE_X1) | | |
96 | create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1)); | |
97 | ||
98 | return n; | |
99 | } | |
100 | ||
2f9ac29e | 101 | static tilepro_bundle_bits rewrite_load_store_unaligned( |
867e359b | 102 | struct single_step_state *state, |
2f9ac29e | 103 | tilepro_bundle_bits bundle, |
867e359b CM |
104 | struct pt_regs *regs, |
105 | enum mem_op mem_op, | |
106 | int size, int sign_ext) | |
107 | { | |
0707ad30 | 108 | unsigned char __user *addr; |
867e359b | 109 | int val_reg, addr_reg, err, val; |
2f9ac29e CM |
110 | int align_ctl; |
111 | ||
112 | align_ctl = unaligned_fixup; | |
113 | switch (task_thread_info(current)->align_ctl) { | |
114 | case PR_UNALIGN_NOPRINT: | |
115 | align_ctl = 1; | |
116 | break; | |
117 | case PR_UNALIGN_SIGBUS: | |
118 | align_ctl = 0; | |
119 | break; | |
120 | } | |
867e359b CM |
121 | |
122 | /* Get address and value registers */ | |
eb7c792d | 123 | if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { |
867e359b CM |
124 | addr_reg = get_SrcA_Y2(bundle); |
125 | val_reg = get_SrcBDest_Y2(bundle); | |
126 | } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | |
127 | addr_reg = get_SrcA_X1(bundle); | |
128 | val_reg = get_Dest_X1(bundle); | |
129 | } else { | |
130 | addr_reg = get_SrcA_X1(bundle); | |
131 | val_reg = get_SrcB_X1(bundle); | |
132 | } | |
133 | ||
134 | /* | |
135 | * If registers are not GPRs, don't try to handle it. | |
136 | * | |
137 | * FIXME: we could handle non-GPR loads by getting the real value | |
138 | * from memory, writing it to the single step buffer, using a | |
139 | * temp_reg to hold a pointer to that memory, then executing that | |
140 | * instruction and resetting temp_reg. For non-GPR stores, it's a | |
141 | * little trickier; we could use the single step buffer for that | |
142 | * too, but we'd have to add some more state bits so that we could | |
143 | * call back in here to copy that value to the real target. For | |
144 | * now, we just handle the simple case. | |
145 | */ | |
146 | if ((val_reg >= PTREGS_NR_GPRS && | |
147 | (val_reg != TREG_ZERO || | |
148 | mem_op == MEMOP_LOAD || | |
149 | mem_op == MEMOP_LOAD_POSTINCR)) || | |
150 | addr_reg >= PTREGS_NR_GPRS) | |
151 | return bundle; | |
152 | ||
153 | /* If it's aligned, don't handle it specially */ | |
0707ad30 | 154 | addr = (void __user *)regs->regs[addr_reg]; |
867e359b CM |
155 | if (((unsigned long)addr % size) == 0) |
156 | return bundle; | |
157 | ||
cdd8e16f CM |
158 | /* |
159 | * Return SIGBUS with the unaligned address, if requested. | |
160 | * Note that we return SIGBUS even for completely invalid addresses | |
161 | * as long as they are in fact unaligned; this matches what the | |
162 | * tilepro hardware would be doing, if it could provide us with the | |
163 | * actual bad address in an SPR, which it doesn't. | |
164 | */ | |
2f9ac29e | 165 | if (align_ctl == 0) { |
cdd8e16f CM |
166 | siginfo_t info = { |
167 | .si_signo = SIGBUS, | |
168 | .si_code = BUS_ADRALN, | |
169 | .si_addr = addr | |
170 | }; | |
171 | trace_unhandled_signal("unaligned trap", regs, | |
172 | (unsigned long)addr, SIGBUS); | |
173 | force_sig_info(info.si_signo, &info, current); | |
174 | return (tilepro_bundle_bits) 0; | |
175 | } | |
176 | ||
867e359b CM |
177 | /* Handle unaligned load/store */ |
178 | if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | |
179 | unsigned short val_16; | |
180 | switch (size) { | |
181 | case 2: | |
182 | err = copy_from_user(&val_16, addr, sizeof(val_16)); | |
183 | val = sign_ext ? ((short)val_16) : val_16; | |
184 | break; | |
185 | case 4: | |
186 | err = copy_from_user(&val, addr, sizeof(val)); | |
187 | break; | |
188 | default: | |
189 | BUG(); | |
190 | } | |
191 | if (err == 0) { | |
192 | state->update_reg = val_reg; | |
193 | state->update_value = val; | |
194 | state->update = 1; | |
195 | } | |
196 | } else { | |
1efea40d | 197 | unsigned short val_16; |
867e359b | 198 | val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; |
1efea40d CM |
199 | switch (size) { |
200 | case 2: | |
201 | val_16 = val; | |
202 | err = copy_to_user(addr, &val_16, sizeof(val_16)); | |
203 | break; | |
204 | case 4: | |
205 | err = copy_to_user(addr, &val, sizeof(val)); | |
206 | break; | |
207 | default: | |
208 | BUG(); | |
209 | } | |
867e359b CM |
210 | } |
211 | ||
212 | if (err) { | |
213 | siginfo_t info = { | |
2f9ac29e CM |
214 | .si_signo = SIGBUS, |
215 | .si_code = BUS_ADRALN, | |
0707ad30 | 216 | .si_addr = addr |
867e359b | 217 | }; |
2f9ac29e CM |
218 | trace_unhandled_signal("bad address for unaligned fixup", regs, |
219 | (unsigned long)addr, SIGBUS); | |
867e359b | 220 | force_sig_info(info.si_signo, &info, current); |
2f9ac29e | 221 | return (tilepro_bundle_bits) 0; |
867e359b CM |
222 | } |
223 | ||
867e359b | 224 | if (unaligned_printk || unaligned_fixup_count == 0) { |
0707ad30 CM |
225 | pr_info("Process %d/%s: PC %#lx: Fixup of" |
226 | " unaligned %s at %#lx.\n", | |
227 | current->pid, current->comm, regs->pc, | |
228 | (mem_op == MEMOP_LOAD || | |
229 | mem_op == MEMOP_LOAD_POSTINCR) ? | |
230 | "load" : "store", | |
231 | (unsigned long)addr); | |
867e359b | 232 | if (!unaligned_printk) { |
0707ad30 CM |
233 | #define P pr_info |
234 | P("\n"); | |
235 | P("Unaligned fixups in the kernel will slow your application considerably.\n"); | |
236 | P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n"); | |
237 | P("which requests the kernel show all unaligned fixups, or write a \"0\"\n"); | |
238 | P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n"); | |
239 | P("access will become a SIGBUS you can debug. No further warnings will be\n"); | |
240 | P("shown so as to avoid additional slowdown, but you can track the number\n"); | |
241 | P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n"); | |
242 | P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n"); | |
243 | P("\n"); | |
244 | #undef P | |
867e359b CM |
245 | } |
246 | } | |
247 | ++unaligned_fixup_count; | |
248 | ||
eb7c792d | 249 | if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { |
867e359b CM |
250 | /* Convert the Y2 instruction to a prefetch. */ |
251 | bundle &= ~(create_SrcBDest_Y2(-1) | | |
252 | create_Opcode_Y2(-1)); | |
253 | bundle |= (create_SrcBDest_Y2(TREG_ZERO) | | |
254 | create_Opcode_Y2(LW_OPCODE_Y2)); | |
255 | /* Replace the load postincr with an addi */ | |
256 | } else if (mem_op == MEMOP_LOAD_POSTINCR) { | |
257 | bundle = addi_X1(bundle, addr_reg, addr_reg, | |
258 | get_Imm8_X1(bundle)); | |
259 | /* Replace the store postincr with an addi */ | |
260 | } else if (mem_op == MEMOP_STORE_POSTINCR) { | |
261 | bundle = addi_X1(bundle, addr_reg, addr_reg, | |
262 | get_Dest_Imm8_X1(bundle)); | |
263 | } else { | |
264 | /* Convert the X1 instruction to a nop. */ | |
265 | bundle &= ~(create_Opcode_X1(-1) | | |
266 | create_UnShOpcodeExtension_X1(-1) | | |
267 | create_UnOpcodeExtension_X1(-1)); | |
268 | bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) | | |
269 | create_UnShOpcodeExtension_X1( | |
270 | UN_0_SHUN_0_OPCODE_X1) | | |
271 | create_UnOpcodeExtension_X1( | |
272 | NOP_UN_0_SHUN_0_OPCODE_X1)); | |
273 | } | |
274 | ||
275 | return bundle; | |
276 | } | |
277 | ||
04f7a3f1 CM |
278 | /* |
279 | * Called after execve() has started the new image. This allows us | |
280 | * to reset the info state. Note that the the mmap'ed memory, if there | |
281 | * was any, has already been unmapped by the exec. | |
282 | */ | |
283 | void single_step_execve(void) | |
284 | { | |
285 | struct thread_info *ti = current_thread_info(); | |
286 | kfree(ti->step_state); | |
287 | ti->step_state = NULL; | |
288 | } | |
289 | ||
2f9ac29e | 290 | /* |
867e359b CM |
291 | * single_step_once() - entry point when single stepping has been triggered. |
292 | * @regs: The machine register state | |
293 | * | |
294 | * When we arrive at this routine via a trampoline, the single step | |
295 | * engine copies the executing bundle to the single step buffer. | |
296 | * If the instruction is a condition branch, then the target is | |
297 | * reset to one past the next instruction. If the instruction | |
298 | * sets the lr, then that is noted. If the instruction is a jump | |
299 | * or call, then the new target pc is preserved and the current | |
300 | * bundle instruction set to null. | |
301 | * | |
302 | * The necessary post-single-step rewriting information is stored in | |
303 | * single_step_state-> We use data segment values because the | |
304 | * stack will be rewound when we run the rewritten single-stepped | |
305 | * instruction. | |
306 | */ | |
307 | void single_step_once(struct pt_regs *regs) | |
308 | { | |
2f9ac29e CM |
309 | extern tilepro_bundle_bits __single_step_ill_insn; |
310 | extern tilepro_bundle_bits __single_step_j_insn; | |
311 | extern tilepro_bundle_bits __single_step_addli_insn; | |
312 | extern tilepro_bundle_bits __single_step_auli_insn; | |
867e359b CM |
313 | struct thread_info *info = (void *)current_thread_info(); |
314 | struct single_step_state *state = info->step_state; | |
315 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | |
2f9ac29e CM |
316 | tilepro_bundle_bits __user *buffer, *pc; |
317 | tilepro_bundle_bits bundle; | |
867e359b CM |
318 | int temp_reg; |
319 | int target_reg = TREG_LR; | |
320 | int err; | |
321 | enum mem_op mem_op = MEMOP_NONE; | |
322 | int size = 0, sign_ext = 0; /* happy compiler */ | |
2f9ac29e CM |
323 | int align_ctl; |
324 | ||
325 | align_ctl = unaligned_fixup; | |
326 | switch (task_thread_info(current)->align_ctl) { | |
327 | case PR_UNALIGN_NOPRINT: | |
328 | align_ctl = 1; | |
329 | break; | |
330 | case PR_UNALIGN_SIGBUS: | |
331 | align_ctl = 0; | |
332 | break; | |
333 | } | |
867e359b CM |
334 | |
335 | asm( | |
336 | " .pushsection .rodata.single_step\n" | |
337 | " .align 8\n" | |
338 | " .globl __single_step_ill_insn\n" | |
339 | "__single_step_ill_insn:\n" | |
340 | " ill\n" | |
341 | " .globl __single_step_addli_insn\n" | |
342 | "__single_step_addli_insn:\n" | |
343 | " { nop; addli r0, zero, 0 }\n" | |
344 | " .globl __single_step_auli_insn\n" | |
345 | "__single_step_auli_insn:\n" | |
346 | " { nop; auli r0, r0, 0 }\n" | |
347 | " .globl __single_step_j_insn\n" | |
348 | "__single_step_j_insn:\n" | |
349 | " j .\n" | |
350 | " .popsection\n" | |
351 | ); | |
352 | ||
313ce674 CM |
353 | /* |
354 | * Enable interrupts here to allow touching userspace and the like. | |
355 | * The callers expect this: do_trap() already has interrupts | |
356 | * enabled, and do_work_pending() handles functions that enable | |
357 | * interrupts internally. | |
358 | */ | |
359 | local_irq_enable(); | |
360 | ||
867e359b CM |
361 | if (state == NULL) { |
362 | /* allocate a page of writable, executable memory */ | |
363 | state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); | |
364 | if (state == NULL) { | |
0707ad30 | 365 | pr_err("Out of kernel memory trying to single-step\n"); |
867e359b CM |
366 | return; |
367 | } | |
368 | ||
369 | /* allocate a cache line of writable, executable memory */ | |
6be5ceb0 | 370 | buffer = (void __user *) vm_mmap(NULL, 0, 64, |
867e359b CM |
371 | PROT_EXEC | PROT_READ | PROT_WRITE, |
372 | MAP_PRIVATE | MAP_ANONYMOUS, | |
373 | 0); | |
867e359b | 374 | |
0707ad30 | 375 | if (IS_ERR((void __force *)buffer)) { |
867e359b | 376 | kfree(state); |
0707ad30 | 377 | pr_err("Out of kernel pages trying to single-step\n"); |
867e359b CM |
378 | return; |
379 | } | |
380 | ||
381 | state->buffer = buffer; | |
382 | state->is_enabled = 0; | |
383 | ||
384 | info->step_state = state; | |
385 | ||
386 | /* Validate our stored instruction patterns */ | |
387 | BUG_ON(get_Opcode_X1(__single_step_addli_insn) != | |
388 | ADDLI_OPCODE_X1); | |
389 | BUG_ON(get_Opcode_X1(__single_step_auli_insn) != | |
390 | AULI_OPCODE_X1); | |
391 | BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO); | |
392 | BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0); | |
393 | BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0); | |
394 | } | |
395 | ||
396 | /* | |
397 | * If we are returning from a syscall, we still haven't hit the | |
398 | * "ill" for the swint1 instruction. So back the PC up to be | |
399 | * pointing at the swint1, but we'll actually return directly | |
400 | * back to the "ill" so we come back in via SIGILL as if we | |
401 | * had "executed" the swint1 without ever being in kernel space. | |
402 | */ | |
403 | if (regs->faultnum == INT_SWINT_1) | |
404 | regs->pc -= 8; | |
405 | ||
2f9ac29e | 406 | pc = (tilepro_bundle_bits __user *)(regs->pc); |
0707ad30 CM |
407 | if (get_user(bundle, pc) != 0) { |
408 | pr_err("Couldn't read instruction at %p trying to step\n", pc); | |
409 | return; | |
410 | } | |
867e359b CM |
411 | |
412 | /* We'll follow the instruction with 2 ill op bundles */ | |
0707ad30 | 413 | state->orig_pc = (unsigned long)pc; |
867e359b CM |
414 | state->next_pc = (unsigned long)(pc + 1); |
415 | state->branch_next_pc = 0; | |
416 | state->update = 0; | |
417 | ||
eb7c792d | 418 | if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) { |
867e359b CM |
419 | /* two wide, check for control flow */ |
420 | int opcode = get_Opcode_X1(bundle); | |
421 | ||
422 | switch (opcode) { | |
423 | /* branches */ | |
424 | case BRANCH_OPCODE_X1: | |
425 | { | |
04f7a3f1 | 426 | s32 offset = signExtend17(get_BrOff_X1(bundle)); |
867e359b CM |
427 | |
428 | /* | |
429 | * For branches, we use a rewriting trick to let the | |
430 | * hardware evaluate whether the branch is taken or | |
431 | * untaken. We record the target offset and then | |
432 | * rewrite the branch instruction to target 1 insn | |
433 | * ahead if the branch is taken. We then follow the | |
434 | * rewritten branch with two bundles, each containing | |
435 | * an "ill" instruction. The supervisor examines the | |
436 | * pc after the single step code is executed, and if | |
437 | * the pc is the first ill instruction, then the | |
438 | * branch (if any) was not taken. If the pc is the | |
439 | * second ill instruction, then the branch was | |
440 | * taken. The new pc is computed for these cases, and | |
441 | * inserted into the registers for the thread. If | |
442 | * the pc is the start of the single step code, then | |
443 | * an exception or interrupt was taken before the | |
444 | * code started processing, and the same "original" | |
445 | * pc is restored. This change, different from the | |
446 | * original implementation, has the advantage of | |
447 | * executing a single user instruction. | |
448 | */ | |
449 | state->branch_next_pc = (unsigned long)(pc + offset); | |
450 | ||
451 | /* rewrite branch offset to go forward one bundle */ | |
452 | bundle = set_BrOff_X1(bundle, 2); | |
453 | } | |
454 | break; | |
455 | ||
456 | /* jumps */ | |
457 | case JALB_OPCODE_X1: | |
458 | case JALF_OPCODE_X1: | |
459 | state->update = 1; | |
460 | state->next_pc = | |
461 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | |
462 | break; | |
463 | ||
464 | case JB_OPCODE_X1: | |
465 | case JF_OPCODE_X1: | |
466 | state->next_pc = | |
467 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | |
468 | bundle = nop_X1(bundle); | |
469 | break; | |
470 | ||
471 | case SPECIAL_0_OPCODE_X1: | |
472 | switch (get_RRROpcodeExtension_X1(bundle)) { | |
473 | /* jump-register */ | |
474 | case JALRP_SPECIAL_0_OPCODE_X1: | |
475 | case JALR_SPECIAL_0_OPCODE_X1: | |
476 | state->update = 1; | |
477 | state->next_pc = | |
478 | regs->regs[get_SrcA_X1(bundle)]; | |
479 | break; | |
480 | ||
481 | case JRP_SPECIAL_0_OPCODE_X1: | |
482 | case JR_SPECIAL_0_OPCODE_X1: | |
483 | state->next_pc = | |
484 | regs->regs[get_SrcA_X1(bundle)]; | |
485 | bundle = nop_X1(bundle); | |
486 | break; | |
487 | ||
488 | case LNK_SPECIAL_0_OPCODE_X1: | |
489 | state->update = 1; | |
490 | target_reg = get_Dest_X1(bundle); | |
491 | break; | |
492 | ||
493 | /* stores */ | |
494 | case SH_SPECIAL_0_OPCODE_X1: | |
495 | mem_op = MEMOP_STORE; | |
496 | size = 2; | |
497 | break; | |
498 | ||
499 | case SW_SPECIAL_0_OPCODE_X1: | |
500 | mem_op = MEMOP_STORE; | |
501 | size = 4; | |
502 | break; | |
503 | } | |
504 | break; | |
505 | ||
506 | /* loads and iret */ | |
507 | case SHUN_0_OPCODE_X1: | |
508 | if (get_UnShOpcodeExtension_X1(bundle) == | |
509 | UN_0_SHUN_0_OPCODE_X1) { | |
510 | switch (get_UnOpcodeExtension_X1(bundle)) { | |
511 | case LH_UN_0_SHUN_0_OPCODE_X1: | |
512 | mem_op = MEMOP_LOAD; | |
513 | size = 2; | |
514 | sign_ext = 1; | |
515 | break; | |
516 | ||
517 | case LH_U_UN_0_SHUN_0_OPCODE_X1: | |
518 | mem_op = MEMOP_LOAD; | |
519 | size = 2; | |
520 | sign_ext = 0; | |
521 | break; | |
522 | ||
523 | case LW_UN_0_SHUN_0_OPCODE_X1: | |
524 | mem_op = MEMOP_LOAD; | |
525 | size = 4; | |
526 | break; | |
527 | ||
528 | case IRET_UN_0_SHUN_0_OPCODE_X1: | |
529 | { | |
530 | unsigned long ex0_0 = __insn_mfspr( | |
531 | SPR_EX_CONTEXT_0_0); | |
532 | unsigned long ex0_1 = __insn_mfspr( | |
533 | SPR_EX_CONTEXT_0_1); | |
534 | /* | |
535 | * Special-case it if we're iret'ing | |
536 | * to PL0 again. Otherwise just let | |
537 | * it run and it will generate SIGILL. | |
538 | */ | |
539 | if (EX1_PL(ex0_1) == USER_PL) { | |
540 | state->next_pc = ex0_0; | |
541 | regs->ex1 = ex0_1; | |
542 | bundle = nop_X1(bundle); | |
543 | } | |
544 | } | |
545 | } | |
546 | } | |
547 | break; | |
548 | ||
867e359b CM |
549 | /* postincrement operations */ |
550 | case IMM_0_OPCODE_X1: | |
551 | switch (get_ImmOpcodeExtension_X1(bundle)) { | |
552 | case LWADD_IMM_0_OPCODE_X1: | |
553 | mem_op = MEMOP_LOAD_POSTINCR; | |
554 | size = 4; | |
555 | break; | |
556 | ||
557 | case LHADD_IMM_0_OPCODE_X1: | |
558 | mem_op = MEMOP_LOAD_POSTINCR; | |
559 | size = 2; | |
560 | sign_ext = 1; | |
561 | break; | |
562 | ||
563 | case LHADD_U_IMM_0_OPCODE_X1: | |
564 | mem_op = MEMOP_LOAD_POSTINCR; | |
565 | size = 2; | |
566 | sign_ext = 0; | |
567 | break; | |
568 | ||
569 | case SWADD_IMM_0_OPCODE_X1: | |
570 | mem_op = MEMOP_STORE_POSTINCR; | |
571 | size = 4; | |
572 | break; | |
573 | ||
574 | case SHADD_IMM_0_OPCODE_X1: | |
575 | mem_op = MEMOP_STORE_POSTINCR; | |
576 | size = 2; | |
577 | break; | |
578 | ||
579 | default: | |
580 | break; | |
581 | } | |
582 | break; | |
867e359b CM |
583 | } |
584 | ||
585 | if (state->update) { | |
586 | /* | |
587 | * Get an available register. We start with a | |
588 | * bitmask with 1's for available registers. | |
589 | * We truncate to the low 32 registers since | |
590 | * we are guaranteed to have set bits in the | |
591 | * low 32 bits, then use ctz to pick the first. | |
592 | */ | |
593 | u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) | | |
594 | (1ULL << get_SrcA_X0(bundle)) | | |
595 | (1ULL << get_SrcB_X0(bundle)) | | |
596 | (1ULL << target_reg)); | |
597 | temp_reg = __builtin_ctz(mask); | |
598 | state->update_reg = temp_reg; | |
599 | state->update_value = regs->regs[temp_reg]; | |
600 | regs->regs[temp_reg] = (unsigned long) (pc+1); | |
601 | regs->flags |= PT_FLAGS_RESTORE_REGS; | |
602 | bundle = move_X1(bundle, target_reg, temp_reg); | |
603 | } | |
604 | } else { | |
605 | int opcode = get_Opcode_Y2(bundle); | |
606 | ||
607 | switch (opcode) { | |
608 | /* loads */ | |
609 | case LH_OPCODE_Y2: | |
610 | mem_op = MEMOP_LOAD; | |
611 | size = 2; | |
612 | sign_ext = 1; | |
613 | break; | |
614 | ||
615 | case LH_U_OPCODE_Y2: | |
616 | mem_op = MEMOP_LOAD; | |
617 | size = 2; | |
618 | sign_ext = 0; | |
619 | break; | |
620 | ||
621 | case LW_OPCODE_Y2: | |
622 | mem_op = MEMOP_LOAD; | |
623 | size = 4; | |
624 | break; | |
625 | ||
626 | /* stores */ | |
627 | case SH_OPCODE_Y2: | |
628 | mem_op = MEMOP_STORE; | |
629 | size = 2; | |
630 | break; | |
631 | ||
632 | case SW_OPCODE_Y2: | |
633 | mem_op = MEMOP_STORE; | |
634 | size = 4; | |
635 | break; | |
636 | } | |
637 | } | |
638 | ||
639 | /* | |
640 | * Check if we need to rewrite an unaligned load/store. | |
2f9ac29e | 641 | * Returning zero is a special value meaning we generated a signal. |
867e359b | 642 | */ |
2f9ac29e | 643 | if (mem_op != MEMOP_NONE && align_ctl >= 0) { |
867e359b CM |
644 | bundle = rewrite_load_store_unaligned(state, bundle, regs, |
645 | mem_op, size, sign_ext); | |
646 | if (bundle == 0) | |
647 | return; | |
648 | } | |
649 | ||
650 | /* write the bundle to our execution area */ | |
651 | buffer = state->buffer; | |
652 | err = __put_user(bundle, buffer++); | |
653 | ||
654 | /* | |
655 | * If we're really single-stepping, we take an INT_ILL after. | |
656 | * If we're just handling an unaligned access, we can just | |
657 | * jump directly back to where we were in user code. | |
658 | */ | |
659 | if (is_single_step) { | |
660 | err |= __put_user(__single_step_ill_insn, buffer++); | |
661 | err |= __put_user(__single_step_ill_insn, buffer++); | |
662 | } else { | |
663 | long delta; | |
664 | ||
665 | if (state->update) { | |
666 | /* We have some state to update; do it inline */ | |
667 | int ha16; | |
668 | bundle = __single_step_addli_insn; | |
669 | bundle |= create_Dest_X1(state->update_reg); | |
670 | bundle |= create_Imm16_X1(state->update_value); | |
671 | err |= __put_user(bundle, buffer++); | |
672 | bundle = __single_step_auli_insn; | |
673 | bundle |= create_Dest_X1(state->update_reg); | |
674 | bundle |= create_SrcA_X1(state->update_reg); | |
675 | ha16 = (state->update_value + 0x8000) >> 16; | |
676 | bundle |= create_Imm16_X1(ha16); | |
677 | err |= __put_user(bundle, buffer++); | |
678 | state->update = 0; | |
679 | } | |
680 | ||
681 | /* End with a jump back to the next instruction */ | |
2f9ac29e | 682 | delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) - |
867e359b | 683 | (unsigned long)buffer) >> |
2f9ac29e | 684 | TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES; |
867e359b CM |
685 | bundle = __single_step_j_insn; |
686 | bundle |= create_JOffLong_X1(delta); | |
687 | err |= __put_user(bundle, buffer++); | |
688 | } | |
689 | ||
690 | if (err) { | |
0707ad30 | 691 | pr_err("Fault when writing to single-step buffer\n"); |
867e359b CM |
692 | return; |
693 | } | |
694 | ||
695 | /* | |
696 | * Flush the buffer. | |
697 | * We do a local flush only, since this is a thread-specific buffer. | |
698 | */ | |
0707ad30 CM |
699 | __flush_icache_range((unsigned long)state->buffer, |
700 | (unsigned long)buffer); | |
867e359b CM |
701 | |
702 | /* Indicate enabled */ | |
703 | state->is_enabled = is_single_step; | |
0707ad30 | 704 | regs->pc = (unsigned long)state->buffer; |
867e359b CM |
705 | |
706 | /* Fault immediately if we are coming back from a syscall. */ | |
707 | if (regs->faultnum == INT_SWINT_1) | |
708 | regs->pc += 8; | |
709 | } | |
710 | ||
233325b9 | 711 | #else |
233325b9 CM |
712 | |
713 | static DEFINE_PER_CPU(unsigned long, ss_saved_pc); | |
714 | ||
715 | ||
716 | /* | |
717 | * Called directly on the occasion of an interrupt. | |
718 | * | |
719 | * If the process doesn't have single step set, then we use this as an | |
720 | * opportunity to turn single step off. | |
721 | * | |
722 | * It has been mentioned that we could conditionally turn off single stepping | |
723 | * on each entry into the kernel and rely on single_step_once to turn it | |
724 | * on for the processes that matter (as we already do), but this | |
725 | * implementation is somewhat more efficient in that we muck with registers | |
726 | * once on a bum interrupt rather than on every entry into the kernel. | |
727 | * | |
728 | * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred, | |
729 | * so we have to run through this process again before we can say that an | |
730 | * instruction has executed. | |
731 | * | |
732 | * swint will set CANCELED, but it's a legitimate instruction. Fortunately | |
733 | * it changes the PC. If it hasn't changed, then we know that the interrupt | |
734 | * wasn't generated by swint and we'll need to run this process again before | |
735 | * we can say an instruction has executed. | |
736 | * | |
737 | * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get | |
738 | * on with our lives. | |
739 | */ | |
740 | ||
741 | void gx_singlestep_handle(struct pt_regs *regs, int fault_num) | |
742 | { | |
743 | unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); | |
744 | struct thread_info *info = (void *)current_thread_info(); | |
745 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | |
746 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); | |
747 | ||
748 | if (is_single_step == 0) { | |
749 | __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0); | |
750 | ||
751 | } else if ((*ss_pc != regs->pc) || | |
752 | (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) { | |
753 | ||
233325b9 CM |
754 | control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; |
755 | control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; | |
756 | __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); | |
2f9ac29e | 757 | send_sigtrap(current, regs); |
233325b9 CM |
758 | } |
759 | } | |
760 | ||
761 | ||
762 | /* | |
763 | * Called from need_singlestep. Set up the control registers and the enable | |
764 | * register, then return back. | |
765 | */ | |
766 | ||
767 | void single_step_once(struct pt_regs *regs) | |
768 | { | |
769 | unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); | |
770 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); | |
771 | ||
772 | *ss_pc = regs->pc; | |
773 | control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; | |
774 | control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; | |
775 | __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); | |
776 | __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL); | |
777 | } | |
778 | ||
04f7a3f1 CM |
779 | void single_step_execve(void) |
780 | { | |
781 | /* Nothing */ | |
782 | } | |
783 | ||
867e359b | 784 | #endif /* !__tilegx__ */ |