* spu-linux-nat.c: Include "gdbthread.h".
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/dir.h>
27 #include <sys/ptrace.h>
28 #include <sys/user.h>
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38
39 #ifndef PTRACE_GETSIGINFO
40 # define PTRACE_GETSIGINFO 0x4202
41 # define PTRACE_SETSIGINFO 0x4203
42 #endif
43
44 #ifndef O_LARGEFILE
45 #define O_LARGEFILE 0
46 #endif
47
48 /* If the system headers did not provide the constants, hard-code the normal
49 values. */
50 #ifndef PTRACE_EVENT_FORK
51
52 #define PTRACE_SETOPTIONS 0x4200
53 #define PTRACE_GETEVENTMSG 0x4201
54
55 /* options set using PTRACE_SETOPTIONS */
56 #define PTRACE_O_TRACESYSGOOD 0x00000001
57 #define PTRACE_O_TRACEFORK 0x00000002
58 #define PTRACE_O_TRACEVFORK 0x00000004
59 #define PTRACE_O_TRACECLONE 0x00000008
60 #define PTRACE_O_TRACEEXEC 0x00000010
61 #define PTRACE_O_TRACEVFORKDONE 0x00000020
62 #define PTRACE_O_TRACEEXIT 0x00000040
63
64 /* Wait extended result codes for the above trace options. */
65 #define PTRACE_EVENT_FORK 1
66 #define PTRACE_EVENT_VFORK 2
67 #define PTRACE_EVENT_CLONE 3
68 #define PTRACE_EVENT_EXEC 4
69 #define PTRACE_EVENT_VFORK_DONE 5
70 #define PTRACE_EVENT_EXIT 6
71
72 #endif /* PTRACE_EVENT_FORK */
73
74 /* We can't always assume that this flag is available, but all systems
75 with the ptrace event handlers also have __WALL, so it's safe to use
76 in some contexts. */
77 #ifndef __WALL
78 #define __WALL 0x40000000 /* Wait for any child. */
79 #endif
80
81 #ifdef __UCLIBC__
82 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
83 #define HAS_NOMMU
84 #endif
85 #endif
86
87 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
88 representation of the thread ID.
89
90 ``all_processes'' is keyed by the process ID - which on Linux is (presently)
91 the same as the LWP ID. */
92
93 struct inferior_list all_processes;
94
95 /* A list of all unknown processes which receive stop signals. Some other
96 process will presumably claim each of these as forked children
97 momentarily. */
98
99 struct inferior_list stopped_pids;
100
101 /* FIXME this is a bit of a hack, and could be removed. */
102 int stopping_threads;
103
104 /* FIXME make into a target method? */
105 int using_threads = 1;
106 static int thread_db_active;
107
108 static int must_set_ptrace_flags;
109
110 /* This flag is true iff we've just created or attached to a new inferior
111 but it has not stopped yet. As soon as it does, we need to call the
112 low target's arch_setup callback. */
113 static int new_inferior;
114
115 static void linux_resume_one_process (struct inferior_list_entry *entry,
116 int step, int signal, siginfo_t *info);
117 static void linux_resume (struct thread_resume *resume_info);
118 static void stop_all_processes (void);
119 static int linux_wait_for_event (struct thread_info *child);
120 static int check_removed_breakpoint (struct process_info *event_child);
121 static void *add_process (unsigned long pid);
122
123 struct pending_signals
124 {
125 int signal;
126 siginfo_t info;
127 struct pending_signals *prev;
128 };
129
130 #define PTRACE_ARG3_TYPE long
131 #define PTRACE_XFER_TYPE long
132
133 #ifdef HAVE_LINUX_REGSETS
134 static char *disabled_regsets;
135 static int num_regsets;
136 #endif
137
138 #define pid_of(proc) ((proc)->head.id)
139
140 /* FIXME: Delete eventually. */
141 #define inferior_pid (pid_of (get_thread_process (current_inferior)))
142
143 static void
144 handle_extended_wait (struct process_info *event_child, int wstat)
145 {
146 int event = wstat >> 16;
147 struct process_info *new_process;
148
149 if (event == PTRACE_EVENT_CLONE)
150 {
151 unsigned long new_pid;
152 int ret, status;
153
154 ptrace (PTRACE_GETEVENTMSG, inferior_pid, 0, &new_pid);
155
156 /* If we haven't already seen the new PID stop, wait for it now. */
157 if (! pull_pid_from_list (&stopped_pids, new_pid))
158 {
159 /* The new child has a pending SIGSTOP. We can't affect it until it
160 hits the SIGSTOP, but we're already attached. */
161
162 do {
163 ret = waitpid (new_pid, &status, __WALL);
164 } while (ret == -1 && errno == EINTR);
165
166 if (ret == -1)
167 perror_with_name ("waiting for new child");
168 else if (ret != new_pid)
169 warning ("wait returned unexpected PID %d", ret);
170 else if (!WIFSTOPPED (status))
171 warning ("wait returned unexpected status 0x%x", status);
172 }
173
174 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
175
176 new_process = (struct process_info *) add_process (new_pid);
177 add_thread (new_pid, new_process, new_pid);
178 new_thread_notify (thread_id_to_gdb_id (new_process->lwpid));
179
180 /* Normally we will get the pending SIGSTOP. But in some cases
181 we might get another signal delivered to the group first.
182 If we do, be sure not to lose it. */
183 if (WSTOPSIG (status) == SIGSTOP)
184 {
185 if (stopping_threads)
186 new_process->stopped = 1;
187 else
188 ptrace (PTRACE_CONT, new_pid, 0, 0);
189 }
190 else
191 {
192 new_process->stop_expected = 1;
193 if (stopping_threads)
194 {
195 new_process->stopped = 1;
196 new_process->status_pending_p = 1;
197 new_process->status_pending = status;
198 }
199 else
200 /* Pass the signal on. This is what GDB does - except
201 shouldn't we really report it instead? */
202 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
203 }
204
205 /* Always resume the current thread. If we are stopping
206 threads, it will have a pending SIGSTOP; we may as well
207 collect it now. */
208 linux_resume_one_process (&event_child->head,
209 event_child->stepping, 0, NULL);
210 }
211 }
212
213 /* This function should only be called if the process got a SIGTRAP.
214 The SIGTRAP could mean several things.
215
216 On i386, where decr_pc_after_break is non-zero:
217 If we were single-stepping this process using PTRACE_SINGLESTEP,
218 we will get only the one SIGTRAP (even if the instruction we
219 stepped over was a breakpoint). The value of $eip will be the
220 next instruction.
221 If we continue the process using PTRACE_CONT, we will get a
222 SIGTRAP when we hit a breakpoint. The value of $eip will be
223 the instruction after the breakpoint (i.e. needs to be
224 decremented). If we report the SIGTRAP to GDB, we must also
225 report the undecremented PC. If we cancel the SIGTRAP, we
226 must resume at the decremented PC.
227
228 (Presumably, not yet tested) On a non-decr_pc_after_break machine
229 with hardware or kernel single-step:
230 If we single-step over a breakpoint instruction, our PC will
231 point at the following instruction. If we continue and hit a
232 breakpoint instruction, our PC will point at the breakpoint
233 instruction. */
234
235 static CORE_ADDR
236 get_stop_pc (void)
237 {
238 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
239
240 if (get_thread_process (current_inferior)->stepping)
241 return stop_pc;
242 else
243 return stop_pc - the_low_target.decr_pc_after_break;
244 }
245
246 static void *
247 add_process (unsigned long pid)
248 {
249 struct process_info *process;
250
251 process = (struct process_info *) malloc (sizeof (*process));
252 memset (process, 0, sizeof (*process));
253
254 process->head.id = pid;
255 process->lwpid = pid;
256
257 add_inferior_to_list (&all_processes, &process->head);
258
259 return process;
260 }
261
262 /* Start an inferior process and returns its pid.
263 ALLARGS is a vector of program-name and args. */
264
265 static int
266 linux_create_inferior (char *program, char **allargs)
267 {
268 void *new_process;
269 int pid;
270
271 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
272 pid = vfork ();
273 #else
274 pid = fork ();
275 #endif
276 if (pid < 0)
277 perror_with_name ("fork");
278
279 if (pid == 0)
280 {
281 ptrace (PTRACE_TRACEME, 0, 0, 0);
282
283 signal (__SIGRTMIN + 1, SIG_DFL);
284
285 setpgid (0, 0);
286
287 execv (program, allargs);
288 if (errno == ENOENT)
289 execvp (program, allargs);
290
291 fprintf (stderr, "Cannot exec %s: %s.\n", program,
292 strerror (errno));
293 fflush (stderr);
294 _exit (0177);
295 }
296
297 new_process = add_process (pid);
298 add_thread (pid, new_process, pid);
299 must_set_ptrace_flags = 1;
300 new_inferior = 1;
301
302 return pid;
303 }
304
305 /* Attach to an inferior process. */
306
307 void
308 linux_attach_lwp (unsigned long pid)
309 {
310 struct process_info *new_process;
311
312 if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0)
313 {
314 if (all_threads.head != NULL)
315 {
316 /* If we fail to attach to an LWP, just warn. */
317 fprintf (stderr, "Cannot attach to process %ld: %s (%d)\n", pid,
318 strerror (errno), errno);
319 fflush (stderr);
320 return;
321 }
322 else
323 /* If we fail to attach to a process, report an error. */
324 error ("Cannot attach to process %ld: %s (%d)\n", pid,
325 strerror (errno), errno);
326 }
327
328 ptrace (PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACECLONE);
329
330 new_process = (struct process_info *) add_process (pid);
331 add_thread (pid, new_process, pid);
332 new_thread_notify (thread_id_to_gdb_id (new_process->lwpid));
333
334 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
335 brings it to a halt. We should ignore that SIGSTOP and resume the process
336 (unless this is the first process, in which case the flag will be cleared
337 in linux_attach).
338
339 On the other hand, if we are currently trying to stop all threads, we
340 should treat the new thread as if we had sent it a SIGSTOP. This works
341 because we are guaranteed that add_process added us to the end of the
342 list, and so the new thread has not yet reached wait_for_sigstop (but
343 will). */
344 if (! stopping_threads)
345 new_process->stop_expected = 1;
346 }
347
348 int
349 linux_attach (unsigned long pid)
350 {
351 struct process_info *process;
352
353 linux_attach_lwp (pid);
354
355 /* Don't ignore the initial SIGSTOP if we just attached to this process.
356 It will be collected by wait shortly. */
357 process = (struct process_info *) find_inferior_id (&all_processes, pid);
358 process->stop_expected = 0;
359
360 new_inferior = 1;
361
362 return 0;
363 }
364
365 /* Kill the inferior process. Make us have no inferior. */
366
367 static void
368 linux_kill_one_process (struct inferior_list_entry *entry)
369 {
370 struct thread_info *thread = (struct thread_info *) entry;
371 struct process_info *process = get_thread_process (thread);
372 int wstat;
373
374 /* We avoid killing the first thread here, because of a Linux kernel (at
375 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
376 the children get a chance to be reaped, it will remain a zombie
377 forever. */
378 if (entry == all_threads.head)
379 return;
380
381 do
382 {
383 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
384
385 /* Make sure it died. The loop is most likely unnecessary. */
386 wstat = linux_wait_for_event (thread);
387 } while (WIFSTOPPED (wstat));
388 }
389
390 static void
391 linux_kill (void)
392 {
393 struct thread_info *thread = (struct thread_info *) all_threads.head;
394 struct process_info *process;
395 int wstat;
396
397 if (thread == NULL)
398 return;
399
400 for_each_inferior (&all_threads, linux_kill_one_process);
401
402 /* See the comment in linux_kill_one_process. We did not kill the first
403 thread in the list, so do so now. */
404 process = get_thread_process (thread);
405 do
406 {
407 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
408
409 /* Make sure it died. The loop is most likely unnecessary. */
410 wstat = linux_wait_for_event (thread);
411 } while (WIFSTOPPED (wstat));
412
413 clear_inferiors ();
414 free (all_processes.head);
415 all_processes.head = all_processes.tail = NULL;
416 }
417
418 static void
419 linux_detach_one_process (struct inferior_list_entry *entry)
420 {
421 struct thread_info *thread = (struct thread_info *) entry;
422 struct process_info *process = get_thread_process (thread);
423
424 /* Make sure the process isn't stopped at a breakpoint that's
425 no longer there. */
426 check_removed_breakpoint (process);
427
428 /* If this process is stopped but is expecting a SIGSTOP, then make
429 sure we take care of that now. This isn't absolutely guaranteed
430 to collect the SIGSTOP, but is fairly likely to. */
431 if (process->stop_expected)
432 {
433 /* Clear stop_expected, so that the SIGSTOP will be reported. */
434 process->stop_expected = 0;
435 if (process->stopped)
436 linux_resume_one_process (&process->head, 0, 0, NULL);
437 linux_wait_for_event (thread);
438 }
439
440 /* Flush any pending changes to the process's registers. */
441 regcache_invalidate_one ((struct inferior_list_entry *)
442 get_process_thread (process));
443
444 /* Finally, let it resume. */
445 ptrace (PTRACE_DETACH, pid_of (process), 0, 0);
446 }
447
448 static int
449 linux_detach (void)
450 {
451 delete_all_breakpoints ();
452 for_each_inferior (&all_threads, linux_detach_one_process);
453 clear_inferiors ();
454 free (all_processes.head);
455 all_processes.head = all_processes.tail = NULL;
456 return 0;
457 }
458
459 static void
460 linux_join (void)
461 {
462 extern unsigned long signal_pid;
463 int status, ret;
464
465 do {
466 ret = waitpid (signal_pid, &status, 0);
467 if (WIFEXITED (status) || WIFSIGNALED (status))
468 break;
469 } while (ret != -1 || errno != ECHILD);
470 }
471
472 /* Return nonzero if the given thread is still alive. */
473 static int
474 linux_thread_alive (unsigned long lwpid)
475 {
476 if (find_inferior_id (&all_threads, lwpid) != NULL)
477 return 1;
478 else
479 return 0;
480 }
481
482 /* Return nonzero if this process stopped at a breakpoint which
483 no longer appears to be inserted. Also adjust the PC
484 appropriately to resume where the breakpoint used to be. */
485 static int
486 check_removed_breakpoint (struct process_info *event_child)
487 {
488 CORE_ADDR stop_pc;
489 struct thread_info *saved_inferior;
490
491 if (event_child->pending_is_breakpoint == 0)
492 return 0;
493
494 if (debug_threads)
495 fprintf (stderr, "Checking for breakpoint in process %ld.\n",
496 event_child->lwpid);
497
498 saved_inferior = current_inferior;
499 current_inferior = get_process_thread (event_child);
500
501 stop_pc = get_stop_pc ();
502
503 /* If the PC has changed since we stopped, then we shouldn't do
504 anything. This happens if, for instance, GDB handled the
505 decr_pc_after_break subtraction itself. */
506 if (stop_pc != event_child->pending_stop_pc)
507 {
508 if (debug_threads)
509 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
510 event_child->pending_stop_pc);
511
512 event_child->pending_is_breakpoint = 0;
513 current_inferior = saved_inferior;
514 return 0;
515 }
516
517 /* If the breakpoint is still there, we will report hitting it. */
518 if ((*the_low_target.breakpoint_at) (stop_pc))
519 {
520 if (debug_threads)
521 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
522 current_inferior = saved_inferior;
523 return 0;
524 }
525
526 if (debug_threads)
527 fprintf (stderr, "Removed breakpoint.\n");
528
529 /* For decr_pc_after_break targets, here is where we perform the
530 decrement. We go immediately from this function to resuming,
531 and can not safely call get_stop_pc () again. */
532 if (the_low_target.set_pc != NULL)
533 (*the_low_target.set_pc) (stop_pc);
534
535 /* We consumed the pending SIGTRAP. */
536 event_child->pending_is_breakpoint = 0;
537 event_child->status_pending_p = 0;
538 event_child->status_pending = 0;
539
540 current_inferior = saved_inferior;
541 return 1;
542 }
543
544 /* Return 1 if this process has an interesting status pending. This function
545 may silently resume an inferior process. */
546 static int
547 status_pending_p (struct inferior_list_entry *entry, void *dummy)
548 {
549 struct process_info *process = (struct process_info *) entry;
550
551 if (process->status_pending_p)
552 if (check_removed_breakpoint (process))
553 {
554 /* This thread was stopped at a breakpoint, and the breakpoint
555 is now gone. We were told to continue (or step...) all threads,
556 so GDB isn't trying to single-step past this breakpoint.
557 So instead of reporting the old SIGTRAP, pretend we got to
558 the breakpoint just after it was removed instead of just
559 before; resume the process. */
560 linux_resume_one_process (&process->head, 0, 0, NULL);
561 return 0;
562 }
563
564 return process->status_pending_p;
565 }
566
567 static void
568 linux_wait_for_process (struct process_info **childp, int *wstatp)
569 {
570 int ret;
571 int to_wait_for = -1;
572
573 if (*childp != NULL)
574 to_wait_for = (*childp)->lwpid;
575
576 retry:
577 while (1)
578 {
579 ret = waitpid (to_wait_for, wstatp, WNOHANG);
580
581 if (ret == -1)
582 {
583 if (errno != ECHILD)
584 perror_with_name ("waitpid");
585 }
586 else if (ret > 0)
587 break;
588
589 ret = waitpid (to_wait_for, wstatp, WNOHANG | __WCLONE);
590
591 if (ret == -1)
592 {
593 if (errno != ECHILD)
594 perror_with_name ("waitpid (WCLONE)");
595 }
596 else if (ret > 0)
597 break;
598
599 usleep (1000);
600 }
601
602 if (debug_threads
603 && (!WIFSTOPPED (*wstatp)
604 || (WSTOPSIG (*wstatp) != 32
605 && WSTOPSIG (*wstatp) != 33)))
606 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
607
608 if (to_wait_for == -1)
609 *childp = (struct process_info *) find_inferior_id (&all_processes, ret);
610
611 /* If we didn't find a process, one of two things presumably happened:
612 - A process we started and then detached from has exited. Ignore it.
613 - A process we are controlling has forked and the new child's stop
614 was reported to us by the kernel. Save its PID. */
615 if (*childp == NULL && WIFSTOPPED (*wstatp))
616 {
617 add_pid_to_list (&stopped_pids, ret);
618 goto retry;
619 }
620 else if (*childp == NULL)
621 goto retry;
622
623 (*childp)->stopped = 1;
624 (*childp)->pending_is_breakpoint = 0;
625
626 (*childp)->last_status = *wstatp;
627
628 /* Architecture-specific setup after inferior is running.
629 This needs to happen after we have attached to the inferior
630 and it is stopped for the first time, but before we access
631 any inferior registers. */
632 if (new_inferior)
633 {
634 the_low_target.arch_setup ();
635 #ifdef HAVE_LINUX_REGSETS
636 memset (disabled_regsets, 0, num_regsets);
637 #endif
638 new_inferior = 0;
639 }
640
641 if (debug_threads
642 && WIFSTOPPED (*wstatp))
643 {
644 current_inferior = (struct thread_info *)
645 find_inferior_id (&all_threads, (*childp)->lwpid);
646 /* For testing only; i386_stop_pc prints out a diagnostic. */
647 if (the_low_target.get_pc != NULL)
648 get_stop_pc ();
649 }
650 }
651
652 static int
653 linux_wait_for_event (struct thread_info *child)
654 {
655 CORE_ADDR stop_pc;
656 struct process_info *event_child;
657 int wstat;
658 int bp_status;
659
660 /* Check for a process with a pending status. */
661 /* It is possible that the user changed the pending task's registers since
662 it stopped. We correctly handle the change of PC if we hit a breakpoint
663 (in check_removed_breakpoint); signals should be reported anyway. */
664 if (child == NULL)
665 {
666 event_child = (struct process_info *)
667 find_inferior (&all_processes, status_pending_p, NULL);
668 if (debug_threads && event_child)
669 fprintf (stderr, "Got a pending child %ld\n", event_child->lwpid);
670 }
671 else
672 {
673 event_child = get_thread_process (child);
674 if (event_child->status_pending_p
675 && check_removed_breakpoint (event_child))
676 event_child = NULL;
677 }
678
679 if (event_child != NULL)
680 {
681 if (event_child->status_pending_p)
682 {
683 if (debug_threads)
684 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
685 event_child->lwpid, event_child->status_pending);
686 wstat = event_child->status_pending;
687 event_child->status_pending_p = 0;
688 event_child->status_pending = 0;
689 current_inferior = get_process_thread (event_child);
690 return wstat;
691 }
692 }
693
694 /* We only enter this loop if no process has a pending wait status. Thus
695 any action taken in response to a wait status inside this loop is
696 responding as soon as we detect the status, not after any pending
697 events. */
698 while (1)
699 {
700 if (child == NULL)
701 event_child = NULL;
702 else
703 event_child = get_thread_process (child);
704
705 linux_wait_for_process (&event_child, &wstat);
706
707 if (event_child == NULL)
708 error ("event from unknown child");
709
710 current_inferior = (struct thread_info *)
711 find_inferior_id (&all_threads, event_child->lwpid);
712
713 /* Check for thread exit. */
714 if (! WIFSTOPPED (wstat))
715 {
716 if (debug_threads)
717 fprintf (stderr, "LWP %ld exiting\n", event_child->head.id);
718
719 /* If the last thread is exiting, just return. */
720 if (all_threads.head == all_threads.tail)
721 return wstat;
722
723 dead_thread_notify (thread_id_to_gdb_id (event_child->lwpid));
724
725 remove_inferior (&all_processes, &event_child->head);
726 free (event_child);
727 remove_thread (current_inferior);
728 current_inferior = (struct thread_info *) all_threads.head;
729
730 /* If we were waiting for this particular child to do something...
731 well, it did something. */
732 if (child != NULL)
733 return wstat;
734
735 /* Wait for a more interesting event. */
736 continue;
737 }
738
739 if (WIFSTOPPED (wstat)
740 && WSTOPSIG (wstat) == SIGSTOP
741 && event_child->stop_expected)
742 {
743 if (debug_threads)
744 fprintf (stderr, "Expected stop.\n");
745 event_child->stop_expected = 0;
746 linux_resume_one_process (&event_child->head,
747 event_child->stepping, 0, NULL);
748 continue;
749 }
750
751 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
752 && wstat >> 16 != 0)
753 {
754 handle_extended_wait (event_child, wstat);
755 continue;
756 }
757
758 /* If GDB is not interested in this signal, don't stop other
759 threads, and don't report it to GDB. Just resume the
760 inferior right away. We do this for threading-related
761 signals as well as any that GDB specifically requested we
762 ignore. But never ignore SIGSTOP if we sent it ourselves,
763 and do not ignore signals when stepping - they may require
764 special handling to skip the signal handler. */
765 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
766 thread library? */
767 if (WIFSTOPPED (wstat)
768 && !event_child->stepping
769 && (
770 #ifdef USE_THREAD_DB
771 (thread_db_active && (WSTOPSIG (wstat) == __SIGRTMIN
772 || WSTOPSIG (wstat) == __SIGRTMIN + 1))
773 ||
774 #endif
775 (pass_signals[target_signal_from_host (WSTOPSIG (wstat))]
776 && (WSTOPSIG (wstat) != SIGSTOP || !stopping_threads))))
777 {
778 siginfo_t info, *info_p;
779
780 if (debug_threads)
781 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
782 WSTOPSIG (wstat), event_child->head.id);
783
784 if (ptrace (PTRACE_GETSIGINFO, event_child->lwpid, 0, &info) == 0)
785 info_p = &info;
786 else
787 info_p = NULL;
788 linux_resume_one_process (&event_child->head,
789 event_child->stepping,
790 WSTOPSIG (wstat), info_p);
791 continue;
792 }
793
794 /* If this event was not handled above, and is not a SIGTRAP, report
795 it. */
796 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGTRAP)
797 return wstat;
798
799 /* If this target does not support breakpoints, we simply report the
800 SIGTRAP; it's of no concern to us. */
801 if (the_low_target.get_pc == NULL)
802 return wstat;
803
804 stop_pc = get_stop_pc ();
805
806 /* bp_reinsert will only be set if we were single-stepping.
807 Notice that we will resume the process after hitting
808 a gdbserver breakpoint; single-stepping to/over one
809 is not supported (yet). */
810 if (event_child->bp_reinsert != 0)
811 {
812 if (debug_threads)
813 fprintf (stderr, "Reinserted breakpoint.\n");
814 reinsert_breakpoint (event_child->bp_reinsert);
815 event_child->bp_reinsert = 0;
816
817 /* Clear the single-stepping flag and SIGTRAP as we resume. */
818 linux_resume_one_process (&event_child->head, 0, 0, NULL);
819 continue;
820 }
821
822 bp_status = check_breakpoints (stop_pc);
823
824 if (bp_status != 0)
825 {
826 if (debug_threads)
827 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
828
829 /* We hit one of our own breakpoints. We mark it as a pending
830 breakpoint, so that check_removed_breakpoint () will do the PC
831 adjustment for us at the appropriate time. */
832 event_child->pending_is_breakpoint = 1;
833 event_child->pending_stop_pc = stop_pc;
834
835 /* We may need to put the breakpoint back. We continue in the event
836 loop instead of simply replacing the breakpoint right away,
837 in order to not lose signals sent to the thread that hit the
838 breakpoint. Unfortunately this increases the window where another
839 thread could sneak past the removed breakpoint. For the current
840 use of server-side breakpoints (thread creation) this is
841 acceptable; but it needs to be considered before this breakpoint
842 mechanism can be used in more general ways. For some breakpoints
843 it may be necessary to stop all other threads, but that should
844 be avoided where possible.
845
846 If breakpoint_reinsert_addr is NULL, that means that we can
847 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
848 mark it for reinsertion, and single-step.
849
850 Otherwise, call the target function to figure out where we need
851 our temporary breakpoint, create it, and continue executing this
852 process. */
853 if (bp_status == 2)
854 /* No need to reinsert. */
855 linux_resume_one_process (&event_child->head, 0, 0, NULL);
856 else if (the_low_target.breakpoint_reinsert_addr == NULL)
857 {
858 event_child->bp_reinsert = stop_pc;
859 uninsert_breakpoint (stop_pc);
860 linux_resume_one_process (&event_child->head, 1, 0, NULL);
861 }
862 else
863 {
864 reinsert_breakpoint_by_bp
865 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
866 linux_resume_one_process (&event_child->head, 0, 0, NULL);
867 }
868
869 continue;
870 }
871
872 if (debug_threads)
873 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
874
875 /* If we were single-stepping, we definitely want to report the
876 SIGTRAP. The single-step operation has completed, so also
877 clear the stepping flag; in general this does not matter,
878 because the SIGTRAP will be reported to the client, which
879 will give us a new action for this thread, but clear it for
880 consistency anyway. It's safe to clear the stepping flag
881 because the only consumer of get_stop_pc () after this point
882 is check_removed_breakpoint, and pending_is_breakpoint is not
883 set. It might be wiser to use a step_completed flag instead. */
884 if (event_child->stepping)
885 {
886 event_child->stepping = 0;
887 return wstat;
888 }
889
890 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
891 Check if it is a breakpoint, and if so mark the process information
892 accordingly. This will handle both the necessary fiddling with the
893 PC on decr_pc_after_break targets and suppressing extra threads
894 hitting a breakpoint if two hit it at once and then GDB removes it
895 after the first is reported. Arguably it would be better to report
896 multiple threads hitting breakpoints simultaneously, but the current
897 remote protocol does not allow this. */
898 if ((*the_low_target.breakpoint_at) (stop_pc))
899 {
900 event_child->pending_is_breakpoint = 1;
901 event_child->pending_stop_pc = stop_pc;
902 }
903
904 return wstat;
905 }
906
907 /* NOTREACHED */
908 return 0;
909 }
910
911 /* Wait for process, returns status. */
912
913 static unsigned char
914 linux_wait (char *status)
915 {
916 int w;
917 struct thread_info *child = NULL;
918
919 retry:
920 /* If we were only supposed to resume one thread, only wait for
921 that thread - if it's still alive. If it died, however - which
922 can happen if we're coming from the thread death case below -
923 then we need to make sure we restart the other threads. We could
924 pick a thread at random or restart all; restarting all is less
925 arbitrary. */
926 if (cont_thread != 0 && cont_thread != -1)
927 {
928 child = (struct thread_info *) find_inferior_id (&all_threads,
929 cont_thread);
930
931 /* No stepping, no signal - unless one is pending already, of course. */
932 if (child == NULL)
933 {
934 struct thread_resume resume_info;
935 resume_info.thread = -1;
936 resume_info.step = resume_info.sig = resume_info.leave_stopped = 0;
937 linux_resume (&resume_info);
938 }
939 }
940
941 w = linux_wait_for_event (child);
942 stop_all_processes ();
943
944 if (must_set_ptrace_flags)
945 {
946 ptrace (PTRACE_SETOPTIONS, inferior_pid, 0, PTRACE_O_TRACECLONE);
947 must_set_ptrace_flags = 0;
948 }
949
950 /* If we are waiting for a particular child, and it exited,
951 linux_wait_for_event will return its exit status. Similarly if
952 the last child exited. If this is not the last child, however,
953 do not report it as exited until there is a 'thread exited' response
954 available in the remote protocol. Instead, just wait for another event.
955 This should be safe, because if the thread crashed we will already
956 have reported the termination signal to GDB; that should stop any
957 in-progress stepping operations, etc.
958
959 Report the exit status of the last thread to exit. This matches
960 LinuxThreads' behavior. */
961
962 if (all_threads.head == all_threads.tail)
963 {
964 if (WIFEXITED (w))
965 {
966 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
967 *status = 'W';
968 clear_inferiors ();
969 free (all_processes.head);
970 all_processes.head = all_processes.tail = NULL;
971 return WEXITSTATUS (w);
972 }
973 else if (!WIFSTOPPED (w))
974 {
975 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
976 *status = 'X';
977 clear_inferiors ();
978 free (all_processes.head);
979 all_processes.head = all_processes.tail = NULL;
980 return target_signal_from_host (WTERMSIG (w));
981 }
982 }
983 else
984 {
985 if (!WIFSTOPPED (w))
986 goto retry;
987 }
988
989 *status = 'T';
990 return target_signal_from_host (WSTOPSIG (w));
991 }
992
993 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
994 thread groups are in use, we need to use tkill. */
995
996 static int
997 kill_lwp (unsigned long lwpid, int signo)
998 {
999 static int tkill_failed;
1000
1001 errno = 0;
1002
1003 #ifdef SYS_tkill
1004 if (!tkill_failed)
1005 {
1006 int ret = syscall (SYS_tkill, lwpid, signo);
1007 if (errno != ENOSYS)
1008 return ret;
1009 errno = 0;
1010 tkill_failed = 1;
1011 }
1012 #endif
1013
1014 return kill (lwpid, signo);
1015 }
1016
1017 static void
1018 send_sigstop (struct inferior_list_entry *entry)
1019 {
1020 struct process_info *process = (struct process_info *) entry;
1021
1022 if (process->stopped)
1023 return;
1024
1025 /* If we already have a pending stop signal for this process, don't
1026 send another. */
1027 if (process->stop_expected)
1028 {
1029 if (debug_threads)
1030 fprintf (stderr, "Have pending sigstop for process %ld\n",
1031 process->lwpid);
1032
1033 /* We clear the stop_expected flag so that wait_for_sigstop
1034 will receive the SIGSTOP event (instead of silently resuming and
1035 waiting again). It'll be reset below. */
1036 process->stop_expected = 0;
1037 return;
1038 }
1039
1040 if (debug_threads)
1041 fprintf (stderr, "Sending sigstop to process %ld\n", process->head.id);
1042
1043 kill_lwp (process->head.id, SIGSTOP);
1044 }
1045
1046 static void
1047 wait_for_sigstop (struct inferior_list_entry *entry)
1048 {
1049 struct process_info *process = (struct process_info *) entry;
1050 struct thread_info *saved_inferior, *thread;
1051 int wstat;
1052 unsigned long saved_tid;
1053
1054 if (process->stopped)
1055 return;
1056
1057 saved_inferior = current_inferior;
1058 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1059 thread = (struct thread_info *) find_inferior_id (&all_threads,
1060 process->lwpid);
1061 wstat = linux_wait_for_event (thread);
1062
1063 /* If we stopped with a non-SIGSTOP signal, save it for later
1064 and record the pending SIGSTOP. If the process exited, just
1065 return. */
1066 if (WIFSTOPPED (wstat)
1067 && WSTOPSIG (wstat) != SIGSTOP)
1068 {
1069 if (debug_threads)
1070 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1071 process->lwpid, wstat);
1072 process->status_pending_p = 1;
1073 process->status_pending = wstat;
1074 process->stop_expected = 1;
1075 }
1076
1077 if (linux_thread_alive (saved_tid))
1078 current_inferior = saved_inferior;
1079 else
1080 {
1081 if (debug_threads)
1082 fprintf (stderr, "Previously current thread died.\n");
1083
1084 /* Set a valid thread as current. */
1085 set_desired_inferior (0);
1086 }
1087 }
1088
1089 static void
1090 stop_all_processes (void)
1091 {
1092 stopping_threads = 1;
1093 for_each_inferior (&all_processes, send_sigstop);
1094 for_each_inferior (&all_processes, wait_for_sigstop);
1095 stopping_threads = 0;
1096 }
1097
1098 /* Resume execution of the inferior process.
1099 If STEP is nonzero, single-step it.
1100 If SIGNAL is nonzero, give it that signal. */
1101
1102 static void
1103 linux_resume_one_process (struct inferior_list_entry *entry,
1104 int step, int signal, siginfo_t *info)
1105 {
1106 struct process_info *process = (struct process_info *) entry;
1107 struct thread_info *saved_inferior;
1108
1109 if (process->stopped == 0)
1110 return;
1111
1112 /* If we have pending signals or status, and a new signal, enqueue the
1113 signal. Also enqueue the signal if we are waiting to reinsert a
1114 breakpoint; it will be picked up again below. */
1115 if (signal != 0
1116 && (process->status_pending_p || process->pending_signals != NULL
1117 || process->bp_reinsert != 0))
1118 {
1119 struct pending_signals *p_sig;
1120 p_sig = malloc (sizeof (*p_sig));
1121 p_sig->prev = process->pending_signals;
1122 p_sig->signal = signal;
1123 if (info == NULL)
1124 memset (&p_sig->info, 0, sizeof (siginfo_t));
1125 else
1126 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1127 process->pending_signals = p_sig;
1128 }
1129
1130 if (process->status_pending_p && !check_removed_breakpoint (process))
1131 return;
1132
1133 saved_inferior = current_inferior;
1134 current_inferior = get_process_thread (process);
1135
1136 if (debug_threads)
1137 fprintf (stderr, "Resuming process %ld (%s, signal %d, stop %s)\n", inferior_pid,
1138 step ? "step" : "continue", signal,
1139 process->stop_expected ? "expected" : "not expected");
1140
1141 /* This bit needs some thinking about. If we get a signal that
1142 we must report while a single-step reinsert is still pending,
1143 we often end up resuming the thread. It might be better to
1144 (ew) allow a stack of pending events; then we could be sure that
1145 the reinsert happened right away and not lose any signals.
1146
1147 Making this stack would also shrink the window in which breakpoints are
1148 uninserted (see comment in linux_wait_for_process) but not enough for
1149 complete correctness, so it won't solve that problem. It may be
1150 worthwhile just to solve this one, however. */
1151 if (process->bp_reinsert != 0)
1152 {
1153 if (debug_threads)
1154 fprintf (stderr, " pending reinsert at %08lx", (long)process->bp_reinsert);
1155 if (step == 0)
1156 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1157 step = 1;
1158
1159 /* Postpone any pending signal. It was enqueued above. */
1160 signal = 0;
1161 }
1162
1163 check_removed_breakpoint (process);
1164
1165 if (debug_threads && the_low_target.get_pc != NULL)
1166 {
1167 fprintf (stderr, " ");
1168 (*the_low_target.get_pc) ();
1169 }
1170
1171 /* If we have pending signals, consume one unless we are trying to reinsert
1172 a breakpoint. */
1173 if (process->pending_signals != NULL && process->bp_reinsert == 0)
1174 {
1175 struct pending_signals **p_sig;
1176
1177 p_sig = &process->pending_signals;
1178 while ((*p_sig)->prev != NULL)
1179 p_sig = &(*p_sig)->prev;
1180
1181 signal = (*p_sig)->signal;
1182 if ((*p_sig)->info.si_signo != 0)
1183 ptrace (PTRACE_SETSIGINFO, process->lwpid, 0, &(*p_sig)->info);
1184
1185 free (*p_sig);
1186 *p_sig = NULL;
1187 }
1188
1189 regcache_invalidate_one ((struct inferior_list_entry *)
1190 get_process_thread (process));
1191 errno = 0;
1192 process->stopped = 0;
1193 process->stepping = step;
1194 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, process->lwpid, 0, signal);
1195
1196 current_inferior = saved_inferior;
1197 if (errno)
1198 perror_with_name ("ptrace");
1199 }
1200
1201 static struct thread_resume *resume_ptr;
1202
1203 /* This function is called once per thread. We look up the thread
1204 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1205 resume request.
1206
1207 This algorithm is O(threads * resume elements), but resume elements
1208 is small (and will remain small at least until GDB supports thread
1209 suspension). */
1210 static void
1211 linux_set_resume_request (struct inferior_list_entry *entry)
1212 {
1213 struct process_info *process;
1214 struct thread_info *thread;
1215 int ndx;
1216
1217 thread = (struct thread_info *) entry;
1218 process = get_thread_process (thread);
1219
1220 ndx = 0;
1221 while (resume_ptr[ndx].thread != -1 && resume_ptr[ndx].thread != entry->id)
1222 ndx++;
1223
1224 process->resume = &resume_ptr[ndx];
1225 }
1226
1227 /* This function is called once per thread. We check the thread's resume
1228 request, which will tell us whether to resume, step, or leave the thread
1229 stopped; and what signal, if any, it should be sent. For threads which
1230 we aren't explicitly told otherwise, we preserve the stepping flag; this
1231 is used for stepping over gdbserver-placed breakpoints. */
1232
1233 static void
1234 linux_continue_one_thread (struct inferior_list_entry *entry)
1235 {
1236 struct process_info *process;
1237 struct thread_info *thread;
1238 int step;
1239
1240 thread = (struct thread_info *) entry;
1241 process = get_thread_process (thread);
1242
1243 if (process->resume->leave_stopped)
1244 return;
1245
1246 if (process->resume->thread == -1)
1247 step = process->stepping || process->resume->step;
1248 else
1249 step = process->resume->step;
1250
1251 linux_resume_one_process (&process->head, step, process->resume->sig, NULL);
1252
1253 process->resume = NULL;
1254 }
1255
1256 /* This function is called once per thread. We check the thread's resume
1257 request, which will tell us whether to resume, step, or leave the thread
1258 stopped; and what signal, if any, it should be sent. We queue any needed
1259 signals, since we won't actually resume. We already have a pending event
1260 to report, so we don't need to preserve any step requests; they should
1261 be re-issued if necessary. */
1262
1263 static void
1264 linux_queue_one_thread (struct inferior_list_entry *entry)
1265 {
1266 struct process_info *process;
1267 struct thread_info *thread;
1268
1269 thread = (struct thread_info *) entry;
1270 process = get_thread_process (thread);
1271
1272 if (process->resume->leave_stopped)
1273 return;
1274
1275 /* If we have a new signal, enqueue the signal. */
1276 if (process->resume->sig != 0)
1277 {
1278 struct pending_signals *p_sig;
1279 p_sig = malloc (sizeof (*p_sig));
1280 p_sig->prev = process->pending_signals;
1281 p_sig->signal = process->resume->sig;
1282 memset (&p_sig->info, 0, sizeof (siginfo_t));
1283
1284 /* If this is the same signal we were previously stopped by,
1285 make sure to queue its siginfo. We can ignore the return
1286 value of ptrace; if it fails, we'll skip
1287 PTRACE_SETSIGINFO. */
1288 if (WIFSTOPPED (process->last_status)
1289 && WSTOPSIG (process->last_status) == process->resume->sig)
1290 ptrace (PTRACE_GETSIGINFO, process->lwpid, 0, &p_sig->info);
1291
1292 process->pending_signals = p_sig;
1293 }
1294
1295 process->resume = NULL;
1296 }
1297
1298 /* Set DUMMY if this process has an interesting status pending. */
1299 static int
1300 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1301 {
1302 struct process_info *process = (struct process_info *) entry;
1303
1304 /* Processes which will not be resumed are not interesting, because
1305 we might not wait for them next time through linux_wait. */
1306 if (process->resume->leave_stopped)
1307 return 0;
1308
1309 /* If this thread has a removed breakpoint, we won't have any
1310 events to report later, so check now. check_removed_breakpoint
1311 may clear status_pending_p. We avoid calling check_removed_breakpoint
1312 for any thread that we are not otherwise going to resume - this
1313 lets us preserve stopped status when two threads hit a breakpoint.
1314 GDB removes the breakpoint to single-step a particular thread
1315 past it, then re-inserts it and resumes all threads. We want
1316 to report the second thread without resuming it in the interim. */
1317 if (process->status_pending_p)
1318 check_removed_breakpoint (process);
1319
1320 if (process->status_pending_p)
1321 * (int *) flag_p = 1;
1322
1323 return 0;
1324 }
1325
1326 static void
1327 linux_resume (struct thread_resume *resume_info)
1328 {
1329 int pending_flag;
1330
1331 /* Yes, the use of a global here is rather ugly. */
1332 resume_ptr = resume_info;
1333
1334 for_each_inferior (&all_threads, linux_set_resume_request);
1335
1336 /* If there is a thread which would otherwise be resumed, which
1337 has a pending status, then don't resume any threads - we can just
1338 report the pending status. Make sure to queue any signals
1339 that would otherwise be sent. */
1340 pending_flag = 0;
1341 find_inferior (&all_processes, resume_status_pending_p, &pending_flag);
1342
1343 if (debug_threads)
1344 {
1345 if (pending_flag)
1346 fprintf (stderr, "Not resuming, pending status\n");
1347 else
1348 fprintf (stderr, "Resuming, no pending status\n");
1349 }
1350
1351 if (pending_flag)
1352 for_each_inferior (&all_threads, linux_queue_one_thread);
1353 else
1354 for_each_inferior (&all_threads, linux_continue_one_thread);
1355 }
1356
1357 #ifdef HAVE_LINUX_USRREGS
1358
1359 int
1360 register_addr (int regnum)
1361 {
1362 int addr;
1363
1364 if (regnum < 0 || regnum >= the_low_target.num_regs)
1365 error ("Invalid register number %d.", regnum);
1366
1367 addr = the_low_target.regmap[regnum];
1368
1369 return addr;
1370 }
1371
1372 /* Fetch one register. */
1373 static void
1374 fetch_register (int regno)
1375 {
1376 CORE_ADDR regaddr;
1377 int i, size;
1378 char *buf;
1379
1380 if (regno >= the_low_target.num_regs)
1381 return;
1382 if ((*the_low_target.cannot_fetch_register) (regno))
1383 return;
1384
1385 regaddr = register_addr (regno);
1386 if (regaddr == -1)
1387 return;
1388 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1389 & - sizeof (PTRACE_XFER_TYPE);
1390 buf = alloca (size);
1391 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1392 {
1393 errno = 0;
1394 *(PTRACE_XFER_TYPE *) (buf + i) =
1395 ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1396 regaddr += sizeof (PTRACE_XFER_TYPE);
1397 if (errno != 0)
1398 {
1399 /* Warning, not error, in case we are attached; sometimes the
1400 kernel doesn't let us at the registers. */
1401 char *err = strerror (errno);
1402 char *msg = alloca (strlen (err) + 128);
1403 sprintf (msg, "reading register %d: %s", regno, err);
1404 error (msg);
1405 goto error_exit;
1406 }
1407 }
1408
1409 if (the_low_target.supply_ptrace_register)
1410 the_low_target.supply_ptrace_register (regno, buf);
1411 else
1412 supply_register (regno, buf);
1413
1414 error_exit:;
1415 }
1416
1417 /* Fetch all registers, or just one, from the child process. */
1418 static void
1419 usr_fetch_inferior_registers (int regno)
1420 {
1421 if (regno == -1 || regno == 0)
1422 for (regno = 0; regno < the_low_target.num_regs; regno++)
1423 fetch_register (regno);
1424 else
1425 fetch_register (regno);
1426 }
1427
1428 /* Store our register values back into the inferior.
1429 If REGNO is -1, do this for all registers.
1430 Otherwise, REGNO specifies which register (so we can save time). */
1431 static void
1432 usr_store_inferior_registers (int regno)
1433 {
1434 CORE_ADDR regaddr;
1435 int i, size;
1436 char *buf;
1437
1438 if (regno >= 0)
1439 {
1440 if (regno >= the_low_target.num_regs)
1441 return;
1442
1443 if ((*the_low_target.cannot_store_register) (regno) == 1)
1444 return;
1445
1446 regaddr = register_addr (regno);
1447 if (regaddr == -1)
1448 return;
1449 errno = 0;
1450 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1451 & - sizeof (PTRACE_XFER_TYPE);
1452 buf = alloca (size);
1453 memset (buf, 0, size);
1454
1455 if (the_low_target.collect_ptrace_register)
1456 the_low_target.collect_ptrace_register (regno, buf);
1457 else
1458 collect_register (regno, buf);
1459
1460 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1461 {
1462 errno = 0;
1463 ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr,
1464 *(PTRACE_XFER_TYPE *) (buf + i));
1465 if (errno != 0)
1466 {
1467 if ((*the_low_target.cannot_store_register) (regno) == 0)
1468 {
1469 char *err = strerror (errno);
1470 char *msg = alloca (strlen (err) + 128);
1471 sprintf (msg, "writing register %d: %s",
1472 regno, err);
1473 error (msg);
1474 return;
1475 }
1476 }
1477 regaddr += sizeof (PTRACE_XFER_TYPE);
1478 }
1479 }
1480 else
1481 for (regno = 0; regno < the_low_target.num_regs; regno++)
1482 usr_store_inferior_registers (regno);
1483 }
1484 #endif /* HAVE_LINUX_USRREGS */
1485
1486
1487
1488 #ifdef HAVE_LINUX_REGSETS
1489
1490 static int
1491 regsets_fetch_inferior_registers ()
1492 {
1493 struct regset_info *regset;
1494 int saw_general_regs = 0;
1495
1496 regset = target_regsets;
1497
1498 while (regset->size >= 0)
1499 {
1500 void *buf;
1501 int res;
1502
1503 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1504 {
1505 regset ++;
1506 continue;
1507 }
1508
1509 buf = malloc (regset->size);
1510 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1511 if (res < 0)
1512 {
1513 if (errno == EIO)
1514 {
1515 /* If we get EIO on a regset, do not try it again for
1516 this process. */
1517 disabled_regsets[regset - target_regsets] = 1;
1518 continue;
1519 }
1520 else
1521 {
1522 char s[256];
1523 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld",
1524 inferior_pid);
1525 perror (s);
1526 }
1527 }
1528 else if (regset->type == GENERAL_REGS)
1529 saw_general_regs = 1;
1530 regset->store_function (buf);
1531 regset ++;
1532 }
1533 if (saw_general_regs)
1534 return 0;
1535 else
1536 return 1;
1537 }
1538
1539 static int
1540 regsets_store_inferior_registers ()
1541 {
1542 struct regset_info *regset;
1543 int saw_general_regs = 0;
1544
1545 regset = target_regsets;
1546
1547 while (regset->size >= 0)
1548 {
1549 void *buf;
1550 int res;
1551
1552 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1553 {
1554 regset ++;
1555 continue;
1556 }
1557
1558 buf = malloc (regset->size);
1559
1560 /* First fill the buffer with the current register set contents,
1561 in case there are any items in the kernel's regset that are
1562 not in gdbserver's regcache. */
1563 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1564
1565 if (res == 0)
1566 {
1567 /* Then overlay our cached registers on that. */
1568 regset->fill_function (buf);
1569
1570 /* Only now do we write the register set. */
1571 res = ptrace (regset->set_request, inferior_pid, 0, buf);
1572 }
1573
1574 if (res < 0)
1575 {
1576 if (errno == EIO)
1577 {
1578 /* If we get EIO on a regset, do not try it again for
1579 this process. */
1580 disabled_regsets[regset - target_regsets] = 1;
1581 continue;
1582 }
1583 else
1584 {
1585 perror ("Warning: ptrace(regsets_store_inferior_registers)");
1586 }
1587 }
1588 else if (regset->type == GENERAL_REGS)
1589 saw_general_regs = 1;
1590 regset ++;
1591 free (buf);
1592 }
1593 if (saw_general_regs)
1594 return 0;
1595 else
1596 return 1;
1597 return 0;
1598 }
1599
1600 #endif /* HAVE_LINUX_REGSETS */
1601
1602
1603 void
1604 linux_fetch_registers (int regno)
1605 {
1606 #ifdef HAVE_LINUX_REGSETS
1607 if (regsets_fetch_inferior_registers () == 0)
1608 return;
1609 #endif
1610 #ifdef HAVE_LINUX_USRREGS
1611 usr_fetch_inferior_registers (regno);
1612 #endif
1613 }
1614
1615 void
1616 linux_store_registers (int regno)
1617 {
1618 #ifdef HAVE_LINUX_REGSETS
1619 if (regsets_store_inferior_registers () == 0)
1620 return;
1621 #endif
1622 #ifdef HAVE_LINUX_USRREGS
1623 usr_store_inferior_registers (regno);
1624 #endif
1625 }
1626
1627
1628 /* Copy LEN bytes from inferior's memory starting at MEMADDR
1629 to debugger memory starting at MYADDR. */
1630
1631 static int
1632 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
1633 {
1634 register int i;
1635 /* Round starting address down to longword boundary. */
1636 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1637 /* Round ending address up; get number of longwords that makes. */
1638 register int count
1639 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
1640 / sizeof (PTRACE_XFER_TYPE);
1641 /* Allocate buffer of that many longwords. */
1642 register PTRACE_XFER_TYPE *buffer
1643 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1644 int fd;
1645 char filename[64];
1646
1647 /* Try using /proc. Don't bother for one word. */
1648 if (len >= 3 * sizeof (long))
1649 {
1650 /* We could keep this file open and cache it - possibly one per
1651 thread. That requires some juggling, but is even faster. */
1652 sprintf (filename, "/proc/%ld/mem", inferior_pid);
1653 fd = open (filename, O_RDONLY | O_LARGEFILE);
1654 if (fd == -1)
1655 goto no_proc;
1656
1657 /* If pread64 is available, use it. It's faster if the kernel
1658 supports it (only one syscall), and it's 64-bit safe even on
1659 32-bit platforms (for instance, SPARC debugging a SPARC64
1660 application). */
1661 #ifdef HAVE_PREAD64
1662 if (pread64 (fd, myaddr, len, memaddr) != len)
1663 #else
1664 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
1665 #endif
1666 {
1667 close (fd);
1668 goto no_proc;
1669 }
1670
1671 close (fd);
1672 return 0;
1673 }
1674
1675 no_proc:
1676 /* Read all the longwords */
1677 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1678 {
1679 errno = 0;
1680 buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, 0);
1681 if (errno)
1682 return errno;
1683 }
1684
1685 /* Copy appropriate bytes out of the buffer. */
1686 memcpy (myaddr, (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), len);
1687
1688 return 0;
1689 }
1690
1691 /* Copy LEN bytes of data from debugger memory at MYADDR
1692 to inferior's memory at MEMADDR.
1693 On failure (cannot write the inferior)
1694 returns the value of errno. */
1695
1696 static int
1697 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
1698 {
1699 register int i;
1700 /* Round starting address down to longword boundary. */
1701 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1702 /* Round ending address up; get number of longwords that makes. */
1703 register int count
1704 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
1705 /* Allocate buffer of that many longwords. */
1706 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1707 extern int errno;
1708
1709 if (debug_threads)
1710 {
1711 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
1712 }
1713
1714 /* Fill start and end extra bytes of buffer with existing memory data. */
1715
1716 buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1717 (PTRACE_ARG3_TYPE) addr, 0);
1718
1719 if (count > 1)
1720 {
1721 buffer[count - 1]
1722 = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1723 (PTRACE_ARG3_TYPE) (addr + (count - 1)
1724 * sizeof (PTRACE_XFER_TYPE)),
1725 0);
1726 }
1727
1728 /* Copy data to be written over corresponding part of buffer */
1729
1730 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
1731
1732 /* Write the entire buffer. */
1733
1734 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1735 {
1736 errno = 0;
1737 ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
1738 if (errno)
1739 return errno;
1740 }
1741
1742 return 0;
1743 }
1744
1745 static int linux_supports_tracefork_flag;
1746
1747 /* Helper functions for linux_test_for_tracefork, called via clone (). */
1748
1749 static int
1750 linux_tracefork_grandchild (void *arg)
1751 {
1752 _exit (0);
1753 }
1754
1755 #define STACK_SIZE 4096
1756
1757 static int
1758 linux_tracefork_child (void *arg)
1759 {
1760 ptrace (PTRACE_TRACEME, 0, 0, 0);
1761 kill (getpid (), SIGSTOP);
1762 #ifdef __ia64__
1763 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
1764 CLONE_VM | SIGCHLD, NULL);
1765 #else
1766 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
1767 CLONE_VM | SIGCHLD, NULL);
1768 #endif
1769 _exit (0);
1770 }
1771
1772 /* Wrapper function for waitpid which handles EINTR. */
1773
1774 static int
1775 my_waitpid (int pid, int *status, int flags)
1776 {
1777 int ret;
1778 do
1779 {
1780 ret = waitpid (pid, status, flags);
1781 }
1782 while (ret == -1 && errno == EINTR);
1783
1784 return ret;
1785 }
1786
1787 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
1788 sure that we can enable the option, and that it had the desired
1789 effect. */
1790
1791 static void
1792 linux_test_for_tracefork (void)
1793 {
1794 int child_pid, ret, status;
1795 long second_pid;
1796 char *stack = malloc (STACK_SIZE * 4);
1797
1798 linux_supports_tracefork_flag = 0;
1799
1800 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
1801 #ifdef __ia64__
1802 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
1803 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1804 #else
1805 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
1806 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1807 #endif
1808 if (child_pid == -1)
1809 perror_with_name ("clone");
1810
1811 ret = my_waitpid (child_pid, &status, 0);
1812 if (ret == -1)
1813 perror_with_name ("waitpid");
1814 else if (ret != child_pid)
1815 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
1816 if (! WIFSTOPPED (status))
1817 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
1818
1819 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
1820 if (ret != 0)
1821 {
1822 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1823 if (ret != 0)
1824 {
1825 warning ("linux_test_for_tracefork: failed to kill child");
1826 return;
1827 }
1828
1829 ret = my_waitpid (child_pid, &status, 0);
1830 if (ret != child_pid)
1831 warning ("linux_test_for_tracefork: failed to wait for killed child");
1832 else if (!WIFSIGNALED (status))
1833 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
1834 "killed child", status);
1835
1836 return;
1837 }
1838
1839 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
1840 if (ret != 0)
1841 warning ("linux_test_for_tracefork: failed to resume child");
1842
1843 ret = my_waitpid (child_pid, &status, 0);
1844
1845 if (ret == child_pid && WIFSTOPPED (status)
1846 && status >> 16 == PTRACE_EVENT_FORK)
1847 {
1848 second_pid = 0;
1849 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
1850 if (ret == 0 && second_pid != 0)
1851 {
1852 int second_status;
1853
1854 linux_supports_tracefork_flag = 1;
1855 my_waitpid (second_pid, &second_status, 0);
1856 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
1857 if (ret != 0)
1858 warning ("linux_test_for_tracefork: failed to kill second child");
1859 my_waitpid (second_pid, &status, 0);
1860 }
1861 }
1862 else
1863 warning ("linux_test_for_tracefork: unexpected result from waitpid "
1864 "(%d, status 0x%x)", ret, status);
1865
1866 do
1867 {
1868 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1869 if (ret != 0)
1870 warning ("linux_test_for_tracefork: failed to kill child");
1871 my_waitpid (child_pid, &status, 0);
1872 }
1873 while (WIFSTOPPED (status));
1874
1875 free (stack);
1876 }
1877
1878
1879 static void
1880 linux_look_up_symbols (void)
1881 {
1882 #ifdef USE_THREAD_DB
1883 if (thread_db_active)
1884 return;
1885
1886 thread_db_active = thread_db_init (!linux_supports_tracefork_flag);
1887 #endif
1888 }
1889
1890 static void
1891 linux_request_interrupt (void)
1892 {
1893 extern unsigned long signal_pid;
1894
1895 if (cont_thread != 0 && cont_thread != -1)
1896 {
1897 struct process_info *process;
1898
1899 process = get_thread_process (current_inferior);
1900 kill_lwp (process->lwpid, SIGINT);
1901 }
1902 else
1903 kill_lwp (signal_pid, SIGINT);
1904 }
1905
1906 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
1907 to debugger memory starting at MYADDR. */
1908
1909 static int
1910 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
1911 {
1912 char filename[PATH_MAX];
1913 int fd, n;
1914
1915 snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid);
1916
1917 fd = open (filename, O_RDONLY);
1918 if (fd < 0)
1919 return -1;
1920
1921 if (offset != (CORE_ADDR) 0
1922 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
1923 n = -1;
1924 else
1925 n = read (fd, myaddr, len);
1926
1927 close (fd);
1928
1929 return n;
1930 }
1931
1932 /* These watchpoint related wrapper functions simply pass on the function call
1933 if the target has registered a corresponding function. */
1934
1935 static int
1936 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
1937 {
1938 if (the_low_target.insert_watchpoint != NULL)
1939 return the_low_target.insert_watchpoint (type, addr, len);
1940 else
1941 /* Unsupported (see target.h). */
1942 return 1;
1943 }
1944
1945 static int
1946 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
1947 {
1948 if (the_low_target.remove_watchpoint != NULL)
1949 return the_low_target.remove_watchpoint (type, addr, len);
1950 else
1951 /* Unsupported (see target.h). */
1952 return 1;
1953 }
1954
1955 static int
1956 linux_stopped_by_watchpoint (void)
1957 {
1958 if (the_low_target.stopped_by_watchpoint != NULL)
1959 return the_low_target.stopped_by_watchpoint ();
1960 else
1961 return 0;
1962 }
1963
1964 static CORE_ADDR
1965 linux_stopped_data_address (void)
1966 {
1967 if (the_low_target.stopped_data_address != NULL)
1968 return the_low_target.stopped_data_address ();
1969 else
1970 return 0;
1971 }
1972
1973 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
1974 #if defined(__mcoldfire__)
1975 /* These should really be defined in the kernel's ptrace.h header. */
1976 #define PT_TEXT_ADDR 49*4
1977 #define PT_DATA_ADDR 50*4
1978 #define PT_TEXT_END_ADDR 51*4
1979 #endif
1980
1981 /* Under uClinux, programs are loaded at non-zero offsets, which we need
1982 to tell gdb about. */
1983
1984 static int
1985 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
1986 {
1987 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
1988 unsigned long text, text_end, data;
1989 int pid = get_thread_process (current_inferior)->head.id;
1990
1991 errno = 0;
1992
1993 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
1994 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
1995 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
1996
1997 if (errno == 0)
1998 {
1999 /* Both text and data offsets produced at compile-time (and so
2000 used by gdb) are relative to the beginning of the program,
2001 with the data segment immediately following the text segment.
2002 However, the actual runtime layout in memory may put the data
2003 somewhere else, so when we send gdb a data base-address, we
2004 use the real data base address and subtract the compile-time
2005 data base-address from it (which is just the length of the
2006 text segment). BSS immediately follows data in both
2007 cases. */
2008 *text_p = text;
2009 *data_p = data - (text_end - text);
2010
2011 return 1;
2012 }
2013 #endif
2014 return 0;
2015 }
2016 #endif
2017
2018 static struct target_ops linux_target_ops = {
2019 linux_create_inferior,
2020 linux_attach,
2021 linux_kill,
2022 linux_detach,
2023 linux_join,
2024 linux_thread_alive,
2025 linux_resume,
2026 linux_wait,
2027 linux_fetch_registers,
2028 linux_store_registers,
2029 linux_read_memory,
2030 linux_write_memory,
2031 linux_look_up_symbols,
2032 linux_request_interrupt,
2033 linux_read_auxv,
2034 linux_insert_watchpoint,
2035 linux_remove_watchpoint,
2036 linux_stopped_by_watchpoint,
2037 linux_stopped_data_address,
2038 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2039 linux_read_offsets,
2040 #else
2041 NULL,
2042 #endif
2043 #ifdef USE_THREAD_DB
2044 thread_db_get_tls_address,
2045 #else
2046 NULL,
2047 #endif
2048 NULL,
2049 hostio_last_error_from_errno,
2050 };
2051
2052 static void
2053 linux_init_signals ()
2054 {
2055 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
2056 to find what the cancel signal actually is. */
2057 signal (__SIGRTMIN+1, SIG_IGN);
2058 }
2059
2060 void
2061 initialize_low (void)
2062 {
2063 thread_db_active = 0;
2064 set_target_ops (&linux_target_ops);
2065 set_breakpoint_data (the_low_target.breakpoint,
2066 the_low_target.breakpoint_len);
2067 linux_init_signals ();
2068 linux_test_for_tracefork ();
2069 #ifdef HAVE_LINUX_REGSETS
2070 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
2071 ;
2072 disabled_regsets = malloc (num_regsets);
2073 #endif
2074 }
This page took 0.376395 seconds and 5 git commands to generate.