PTRACE_POKEDATA consolidation
[deliverable/linux.git] / arch / x86_64 / kernel / ptrace.c
1 /* ptrace.c */
2 /* By Ross Biro 1/23/92 */
3 /*
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * x86-64 port 2000-2002 Andi Kleen
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21
22 #include <asm/uaccess.h>
23 #include <asm/pgtable.h>
24 #include <asm/system.h>
25 #include <asm/processor.h>
26 #include <asm/i387.h>
27 #include <asm/debugreg.h>
28 #include <asm/ldt.h>
29 #include <asm/desc.h>
30 #include <asm/proto.h>
31 #include <asm/ia32.h>
32
33 /*
34 * does not yet catch signals sent when the child dies.
35 * in exit.c or in signal.c.
36 */
37
38 /*
39 * Determines which flags the user has access to [1 = access, 0 = no access].
40 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
41 * Also masks reserved bits (63-22, 15, 5, 3, 1).
42 */
43 #define FLAG_MASK 0x54dd5UL
44
45 /* set's the trap flag. */
46 #define TRAP_FLAG 0x100UL
47
48 /*
49 * eflags and offset of eflags on child stack..
50 */
51 #define EFLAGS offsetof(struct pt_regs, eflags)
52 #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
53
54 /*
55 * this routine will get a word off of the processes privileged stack.
56 * the offset is how far from the base addr as stored in the TSS.
57 * this routine assumes that all the privileged stacks are in our
58 * data space.
59 */
60 static inline unsigned long get_stack_long(struct task_struct *task, int offset)
61 {
62 unsigned char *stack;
63
64 stack = (unsigned char *)task->thread.rsp0;
65 stack += offset;
66 return (*((unsigned long *)stack));
67 }
68
69 /*
70 * this routine will put a word on the processes privileged stack.
71 * the offset is how far from the base addr as stored in the TSS.
72 * this routine assumes that all the privileged stacks are in our
73 * data space.
74 */
75 static inline long put_stack_long(struct task_struct *task, int offset,
76 unsigned long data)
77 {
78 unsigned char * stack;
79
80 stack = (unsigned char *) task->thread.rsp0;
81 stack += offset;
82 *(unsigned long *) stack = data;
83 return 0;
84 }
85
86 #define LDT_SEGMENT 4
87
88 unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
89 {
90 unsigned long addr, seg;
91
92 addr = regs->rip;
93 seg = regs->cs & 0xffff;
94
95 /*
96 * We'll assume that the code segments in the GDT
97 * are all zero-based. That is largely true: the
98 * TLS segments are used for data, and the PNPBIOS
99 * and APM bios ones we just ignore here.
100 */
101 if (seg & LDT_SEGMENT) {
102 u32 *desc;
103 unsigned long base;
104
105 down(&child->mm->context.sem);
106 desc = child->mm->context.ldt + (seg & ~7);
107 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
108
109 /* 16-bit code segment? */
110 if (!((desc[1] >> 22) & 1))
111 addr &= 0xffff;
112 addr += base;
113 up(&child->mm->context.sem);
114 }
115 return addr;
116 }
117
118 static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
119 {
120 int i, copied;
121 unsigned char opcode[15];
122 unsigned long addr = convert_rip_to_linear(child, regs);
123
124 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
125 for (i = 0; i < copied; i++) {
126 switch (opcode[i]) {
127 /* popf and iret */
128 case 0x9d: case 0xcf:
129 return 1;
130
131 /* CHECKME: 64 65 */
132
133 /* opcode and address size prefixes */
134 case 0x66: case 0x67:
135 continue;
136 /* irrelevant prefixes (segment overrides and repeats) */
137 case 0x26: case 0x2e:
138 case 0x36: case 0x3e:
139 case 0x64: case 0x65:
140 case 0xf2: case 0xf3:
141 continue;
142
143 case 0x40 ... 0x4f:
144 if (regs->cs != __USER_CS)
145 /* 32-bit mode: register increment */
146 return 0;
147 /* 64-bit mode: REX prefix */
148 continue;
149
150 /* CHECKME: f2, f3 */
151
152 /*
153 * pushf: NOTE! We should probably not let
154 * the user see the TF bit being set. But
155 * it's more pain than it's worth to avoid
156 * it, and a debugger could emulate this
157 * all in user space if it _really_ cares.
158 */
159 case 0x9c:
160 default:
161 return 0;
162 }
163 }
164 return 0;
165 }
166
167 static void set_singlestep(struct task_struct *child)
168 {
169 struct pt_regs *regs = task_pt_regs(child);
170
171 /*
172 * Always set TIF_SINGLESTEP - this guarantees that
173 * we single-step system calls etc.. This will also
174 * cause us to set TF when returning to user mode.
175 */
176 set_tsk_thread_flag(child, TIF_SINGLESTEP);
177
178 /*
179 * If TF was already set, don't do anything else
180 */
181 if (regs->eflags & TRAP_FLAG)
182 return;
183
184 /* Set TF on the kernel stack.. */
185 regs->eflags |= TRAP_FLAG;
186
187 /*
188 * ..but if TF is changed by the instruction we will trace,
189 * don't mark it as being "us" that set it, so that we
190 * won't clear it by hand later.
191 */
192 if (is_setting_trap_flag(child, regs))
193 return;
194
195 child->ptrace |= PT_DTRACE;
196 }
197
198 static void clear_singlestep(struct task_struct *child)
199 {
200 /* Always clear TIF_SINGLESTEP... */
201 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
202
203 /* But touch TF only if it was set by us.. */
204 if (child->ptrace & PT_DTRACE) {
205 struct pt_regs *regs = task_pt_regs(child);
206 regs->eflags &= ~TRAP_FLAG;
207 child->ptrace &= ~PT_DTRACE;
208 }
209 }
210
211 /*
212 * Called by kernel/ptrace.c when detaching..
213 *
214 * Make sure the single step bit is not set.
215 */
216 void ptrace_disable(struct task_struct *child)
217 {
218 clear_singlestep(child);
219 }
220
221 static int putreg(struct task_struct *child,
222 unsigned long regno, unsigned long value)
223 {
224 unsigned long tmp;
225
226 /* Some code in the 64bit emulation may not be 64bit clean.
227 Don't take any chances. */
228 if (test_tsk_thread_flag(child, TIF_IA32))
229 value &= 0xffffffff;
230 switch (regno) {
231 case offsetof(struct user_regs_struct,fs):
232 if (value && (value & 3) != 3)
233 return -EIO;
234 child->thread.fsindex = value & 0xffff;
235 return 0;
236 case offsetof(struct user_regs_struct,gs):
237 if (value && (value & 3) != 3)
238 return -EIO;
239 child->thread.gsindex = value & 0xffff;
240 return 0;
241 case offsetof(struct user_regs_struct,ds):
242 if (value && (value & 3) != 3)
243 return -EIO;
244 child->thread.ds = value & 0xffff;
245 return 0;
246 case offsetof(struct user_regs_struct,es):
247 if (value && (value & 3) != 3)
248 return -EIO;
249 child->thread.es = value & 0xffff;
250 return 0;
251 case offsetof(struct user_regs_struct,ss):
252 if ((value & 3) != 3)
253 return -EIO;
254 value &= 0xffff;
255 return 0;
256 case offsetof(struct user_regs_struct,fs_base):
257 if (value >= TASK_SIZE_OF(child))
258 return -EIO;
259 child->thread.fs = value;
260 return 0;
261 case offsetof(struct user_regs_struct,gs_base):
262 if (value >= TASK_SIZE_OF(child))
263 return -EIO;
264 child->thread.gs = value;
265 return 0;
266 case offsetof(struct user_regs_struct, eflags):
267 value &= FLAG_MASK;
268 tmp = get_stack_long(child, EFL_OFFSET);
269 tmp &= ~FLAG_MASK;
270 value |= tmp;
271 break;
272 case offsetof(struct user_regs_struct,cs):
273 if ((value & 3) != 3)
274 return -EIO;
275 value &= 0xffff;
276 break;
277 }
278 put_stack_long(child, regno - sizeof(struct pt_regs), value);
279 return 0;
280 }
281
282 static unsigned long getreg(struct task_struct *child, unsigned long regno)
283 {
284 unsigned long val;
285 switch (regno) {
286 case offsetof(struct user_regs_struct, fs):
287 return child->thread.fsindex;
288 case offsetof(struct user_regs_struct, gs):
289 return child->thread.gsindex;
290 case offsetof(struct user_regs_struct, ds):
291 return child->thread.ds;
292 case offsetof(struct user_regs_struct, es):
293 return child->thread.es;
294 case offsetof(struct user_regs_struct, fs_base):
295 return child->thread.fs;
296 case offsetof(struct user_regs_struct, gs_base):
297 return child->thread.gs;
298 default:
299 regno = regno - sizeof(struct pt_regs);
300 val = get_stack_long(child, regno);
301 if (test_tsk_thread_flag(child, TIF_IA32))
302 val &= 0xffffffff;
303 return val;
304 }
305
306 }
307
308 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
309 {
310 long i, ret;
311 unsigned ui;
312
313 switch (request) {
314 /* when I and D space are separate, these will need to be fixed. */
315 case PTRACE_PEEKTEXT: /* read word at location addr. */
316 case PTRACE_PEEKDATA:
317 ret = generic_ptrace_peekdata(child, addr, data);
318 break;
319
320 /* read the word at location addr in the USER area. */
321 case PTRACE_PEEKUSR: {
322 unsigned long tmp;
323
324 ret = -EIO;
325 if ((addr & 7) ||
326 addr > sizeof(struct user) - 7)
327 break;
328
329 switch (addr) {
330 case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
331 tmp = getreg(child, addr);
332 break;
333 case offsetof(struct user, u_debugreg[0]):
334 tmp = child->thread.debugreg0;
335 break;
336 case offsetof(struct user, u_debugreg[1]):
337 tmp = child->thread.debugreg1;
338 break;
339 case offsetof(struct user, u_debugreg[2]):
340 tmp = child->thread.debugreg2;
341 break;
342 case offsetof(struct user, u_debugreg[3]):
343 tmp = child->thread.debugreg3;
344 break;
345 case offsetof(struct user, u_debugreg[6]):
346 tmp = child->thread.debugreg6;
347 break;
348 case offsetof(struct user, u_debugreg[7]):
349 tmp = child->thread.debugreg7;
350 break;
351 default:
352 tmp = 0;
353 break;
354 }
355 ret = put_user(tmp,(unsigned long __user *) data);
356 break;
357 }
358
359 /* when I and D space are separate, this will have to be fixed. */
360 case PTRACE_POKETEXT: /* write the word at location addr. */
361 case PTRACE_POKEDATA:
362 ret = generic_ptrace_pokedata(child, addr, data);
363 break;
364
365 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
366 {
367 int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
368 ret = -EIO;
369 if ((addr & 7) ||
370 addr > sizeof(struct user) - 7)
371 break;
372
373 switch (addr) {
374 case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
375 ret = putreg(child, addr, data);
376 break;
377 /* Disallows to set a breakpoint into the vsyscall */
378 case offsetof(struct user, u_debugreg[0]):
379 if (data >= TASK_SIZE_OF(child) - dsize) break;
380 child->thread.debugreg0 = data;
381 ret = 0;
382 break;
383 case offsetof(struct user, u_debugreg[1]):
384 if (data >= TASK_SIZE_OF(child) - dsize) break;
385 child->thread.debugreg1 = data;
386 ret = 0;
387 break;
388 case offsetof(struct user, u_debugreg[2]):
389 if (data >= TASK_SIZE_OF(child) - dsize) break;
390 child->thread.debugreg2 = data;
391 ret = 0;
392 break;
393 case offsetof(struct user, u_debugreg[3]):
394 if (data >= TASK_SIZE_OF(child) - dsize) break;
395 child->thread.debugreg3 = data;
396 ret = 0;
397 break;
398 case offsetof(struct user, u_debugreg[6]):
399 if (data >> 32)
400 break;
401 child->thread.debugreg6 = data;
402 ret = 0;
403 break;
404 case offsetof(struct user, u_debugreg[7]):
405 /* See arch/i386/kernel/ptrace.c for an explanation of
406 * this awkward check.*/
407 data &= ~DR_CONTROL_RESERVED;
408 for(i=0; i<4; i++)
409 if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
410 break;
411 if (i == 4) {
412 child->thread.debugreg7 = data;
413 if (data)
414 set_tsk_thread_flag(child, TIF_DEBUG);
415 else
416 clear_tsk_thread_flag(child, TIF_DEBUG);
417 ret = 0;
418 }
419 break;
420 }
421 break;
422 }
423 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
424 case PTRACE_CONT: /* restart after signal. */
425
426 ret = -EIO;
427 if (!valid_signal(data))
428 break;
429 if (request == PTRACE_SYSCALL)
430 set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
431 else
432 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
433 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
434 child->exit_code = data;
435 /* make sure the single step bit is not set. */
436 clear_singlestep(child);
437 wake_up_process(child);
438 ret = 0;
439 break;
440
441 #ifdef CONFIG_IA32_EMULATION
442 /* This makes only sense with 32bit programs. Allow a
443 64bit debugger to fully examine them too. Better
444 don't use it against 64bit processes, use
445 PTRACE_ARCH_PRCTL instead. */
446 case PTRACE_SET_THREAD_AREA: {
447 struct user_desc __user *p;
448 int old;
449 p = (struct user_desc __user *)data;
450 get_user(old, &p->entry_number);
451 put_user(addr, &p->entry_number);
452 ret = do_set_thread_area(&child->thread, p);
453 put_user(old, &p->entry_number);
454 break;
455 case PTRACE_GET_THREAD_AREA:
456 p = (struct user_desc __user *)data;
457 get_user(old, &p->entry_number);
458 put_user(addr, &p->entry_number);
459 ret = do_get_thread_area(&child->thread, p);
460 put_user(old, &p->entry_number);
461 break;
462 }
463 #endif
464 /* normal 64bit interface to access TLS data.
465 Works just like arch_prctl, except that the arguments
466 are reversed. */
467 case PTRACE_ARCH_PRCTL:
468 ret = do_arch_prctl(child, data, addr);
469 break;
470
471 /*
472 * make the child exit. Best I can do is send it a sigkill.
473 * perhaps it should be put in the status that it wants to
474 * exit.
475 */
476 case PTRACE_KILL:
477 ret = 0;
478 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
479 break;
480 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
481 child->exit_code = SIGKILL;
482 /* make sure the single step bit is not set. */
483 clear_singlestep(child);
484 wake_up_process(child);
485 break;
486
487 case PTRACE_SINGLESTEP: /* set the trap flag. */
488 ret = -EIO;
489 if (!valid_signal(data))
490 break;
491 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
492 set_singlestep(child);
493 child->exit_code = data;
494 /* give it a chance to run. */
495 wake_up_process(child);
496 ret = 0;
497 break;
498
499 case PTRACE_DETACH:
500 /* detach a process that was attached. */
501 ret = ptrace_detach(child, data);
502 break;
503
504 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
505 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
506 sizeof(struct user_regs_struct))) {
507 ret = -EIO;
508 break;
509 }
510 ret = 0;
511 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
512 ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
513 data += sizeof(long);
514 }
515 break;
516 }
517
518 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
519 unsigned long tmp;
520 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
521 sizeof(struct user_regs_struct))) {
522 ret = -EIO;
523 break;
524 }
525 ret = 0;
526 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
527 ret = __get_user(tmp, (unsigned long __user *) data);
528 if (ret)
529 break;
530 ret = putreg(child, ui, tmp);
531 if (ret)
532 break;
533 data += sizeof(long);
534 }
535 break;
536 }
537
538 case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
539 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
540 sizeof(struct user_i387_struct))) {
541 ret = -EIO;
542 break;
543 }
544 ret = get_fpregs((struct user_i387_struct __user *)data, child);
545 break;
546 }
547
548 case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
549 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
550 sizeof(struct user_i387_struct))) {
551 ret = -EIO;
552 break;
553 }
554 set_stopped_child_used_math(child);
555 ret = set_fpregs(child, (struct user_i387_struct __user *)data);
556 break;
557 }
558
559 default:
560 ret = ptrace_request(child, request, addr, data);
561 break;
562 }
563 return ret;
564 }
565
566 static void syscall_trace(struct pt_regs *regs)
567 {
568
569 #if 0
570 printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
571 current->comm,
572 regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
573 current_thread_info()->flags, current->ptrace);
574 #endif
575
576 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
577 ? 0x80 : 0));
578 /*
579 * this isn't the same as continuing with a signal, but it will do
580 * for normal use. strace only continues with a signal if the
581 * stopping signal is not SIGTRAP. -brl
582 */
583 if (current->exit_code) {
584 send_sig(current->exit_code, current, 1);
585 current->exit_code = 0;
586 }
587 }
588
589 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
590 {
591 /* do the secure computing check first */
592 secure_computing(regs->orig_rax);
593
594 if (test_thread_flag(TIF_SYSCALL_TRACE)
595 && (current->ptrace & PT_PTRACED))
596 syscall_trace(regs);
597
598 if (unlikely(current->audit_context)) {
599 if (test_thread_flag(TIF_IA32)) {
600 audit_syscall_entry(AUDIT_ARCH_I386,
601 regs->orig_rax,
602 regs->rbx, regs->rcx,
603 regs->rdx, regs->rsi);
604 } else {
605 audit_syscall_entry(AUDIT_ARCH_X86_64,
606 regs->orig_rax,
607 regs->rdi, regs->rsi,
608 regs->rdx, regs->r10);
609 }
610 }
611 }
612
613 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
614 {
615 if (unlikely(current->audit_context))
616 audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
617
618 if ((test_thread_flag(TIF_SYSCALL_TRACE)
619 || test_thread_flag(TIF_SINGLESTEP))
620 && (current->ptrace & PT_PTRACED))
621 syscall_trace(regs);
622 }
This page took 0.045764 seconds and 5 git commands to generate.