* configure.srv [s390x-*-linux*]: Set srv_regobj to include both
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/dir.h>
27 #include <sys/ptrace.h>
28 #include <sys/user.h>
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38
39 #ifndef PTRACE_GETSIGINFO
40 # define PTRACE_GETSIGINFO 0x4202
41 # define PTRACE_SETSIGINFO 0x4203
42 #endif
43
44 #ifndef O_LARGEFILE
45 #define O_LARGEFILE 0
46 #endif
47
48 /* If the system headers did not provide the constants, hard-code the normal
49 values. */
50 #ifndef PTRACE_EVENT_FORK
51
52 #define PTRACE_SETOPTIONS 0x4200
53 #define PTRACE_GETEVENTMSG 0x4201
54
55 /* options set using PTRACE_SETOPTIONS */
56 #define PTRACE_O_TRACESYSGOOD 0x00000001
57 #define PTRACE_O_TRACEFORK 0x00000002
58 #define PTRACE_O_TRACEVFORK 0x00000004
59 #define PTRACE_O_TRACECLONE 0x00000008
60 #define PTRACE_O_TRACEEXEC 0x00000010
61 #define PTRACE_O_TRACEVFORKDONE 0x00000020
62 #define PTRACE_O_TRACEEXIT 0x00000040
63
64 /* Wait extended result codes for the above trace options. */
65 #define PTRACE_EVENT_FORK 1
66 #define PTRACE_EVENT_VFORK 2
67 #define PTRACE_EVENT_CLONE 3
68 #define PTRACE_EVENT_EXEC 4
69 #define PTRACE_EVENT_VFORK_DONE 5
70 #define PTRACE_EVENT_EXIT 6
71
72 #endif /* PTRACE_EVENT_FORK */
73
74 /* We can't always assume that this flag is available, but all systems
75 with the ptrace event handlers also have __WALL, so it's safe to use
76 in some contexts. */
77 #ifndef __WALL
78 #define __WALL 0x40000000 /* Wait for any child. */
79 #endif
80
81 #ifdef __UCLIBC__
82 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
83 #define HAS_NOMMU
84 #endif
85 #endif
86
87 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
88 representation of the thread ID.
89
90 ``all_processes'' is keyed by the process ID - which on Linux is (presently)
91 the same as the LWP ID. */
92
93 struct inferior_list all_processes;
94
95 /* A list of all unknown processes which receive stop signals. Some other
96 process will presumably claim each of these as forked children
97 momentarily. */
98
99 struct inferior_list stopped_pids;
100
101 /* FIXME this is a bit of a hack, and could be removed. */
102 int stopping_threads;
103
104 /* FIXME make into a target method? */
105 int using_threads = 1;
106 static int thread_db_active;
107
108 static int must_set_ptrace_flags;
109
110 /* This flag is true iff we've just created or attached to a new inferior
111 but it has not stopped yet. As soon as it does, we need to call the
112 low target's arch_setup callback. */
113 static int new_inferior;
114
115 static void linux_resume_one_process (struct inferior_list_entry *entry,
116 int step, int signal, siginfo_t *info);
117 static void linux_resume (struct thread_resume *resume_info);
118 static void stop_all_processes (void);
119 static int linux_wait_for_event (struct thread_info *child);
120 static int check_removed_breakpoint (struct process_info *event_child);
121 static void *add_process (unsigned long pid);
122
123 struct pending_signals
124 {
125 int signal;
126 siginfo_t info;
127 struct pending_signals *prev;
128 };
129
130 #define PTRACE_ARG3_TYPE long
131 #define PTRACE_XFER_TYPE long
132
133 #ifdef HAVE_LINUX_REGSETS
134 static int use_regsets_p = 1;
135 #endif
136
137 #define pid_of(proc) ((proc)->head.id)
138
139 /* FIXME: Delete eventually. */
140 #define inferior_pid (pid_of (get_thread_process (current_inferior)))
141
142 static void
143 handle_extended_wait (struct process_info *event_child, int wstat)
144 {
145 int event = wstat >> 16;
146 struct process_info *new_process;
147
148 if (event == PTRACE_EVENT_CLONE)
149 {
150 unsigned long new_pid;
151 int ret, status;
152
153 ptrace (PTRACE_GETEVENTMSG, inferior_pid, 0, &new_pid);
154
155 /* If we haven't already seen the new PID stop, wait for it now. */
156 if (! pull_pid_from_list (&stopped_pids, new_pid))
157 {
158 /* The new child has a pending SIGSTOP. We can't affect it until it
159 hits the SIGSTOP, but we're already attached. */
160
161 do {
162 ret = waitpid (new_pid, &status, __WALL);
163 } while (ret == -1 && errno == EINTR);
164
165 if (ret == -1)
166 perror_with_name ("waiting for new child");
167 else if (ret != new_pid)
168 warning ("wait returned unexpected PID %d", ret);
169 else if (!WIFSTOPPED (status))
170 warning ("wait returned unexpected status 0x%x", status);
171 }
172
173 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
174
175 new_process = (struct process_info *) add_process (new_pid);
176 add_thread (new_pid, new_process, new_pid);
177 new_thread_notify (thread_id_to_gdb_id (new_process->lwpid));
178
179 /* Normally we will get the pending SIGSTOP. But in some cases
180 we might get another signal delivered to the group first.
181 If we do, be sure not to lose it. */
182 if (WSTOPSIG (status) == SIGSTOP)
183 {
184 if (stopping_threads)
185 new_process->stopped = 1;
186 else
187 ptrace (PTRACE_CONT, new_pid, 0, 0);
188 }
189 else
190 {
191 new_process->stop_expected = 1;
192 if (stopping_threads)
193 {
194 new_process->stopped = 1;
195 new_process->status_pending_p = 1;
196 new_process->status_pending = status;
197 }
198 else
199 /* Pass the signal on. This is what GDB does - except
200 shouldn't we really report it instead? */
201 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
202 }
203
204 /* Always resume the current thread. If we are stopping
205 threads, it will have a pending SIGSTOP; we may as well
206 collect it now. */
207 linux_resume_one_process (&event_child->head,
208 event_child->stepping, 0, NULL);
209 }
210 }
211
212 /* This function should only be called if the process got a SIGTRAP.
213 The SIGTRAP could mean several things.
214
215 On i386, where decr_pc_after_break is non-zero:
216 If we were single-stepping this process using PTRACE_SINGLESTEP,
217 we will get only the one SIGTRAP (even if the instruction we
218 stepped over was a breakpoint). The value of $eip will be the
219 next instruction.
220 If we continue the process using PTRACE_CONT, we will get a
221 SIGTRAP when we hit a breakpoint. The value of $eip will be
222 the instruction after the breakpoint (i.e. needs to be
223 decremented). If we report the SIGTRAP to GDB, we must also
224 report the undecremented PC. If we cancel the SIGTRAP, we
225 must resume at the decremented PC.
226
227 (Presumably, not yet tested) On a non-decr_pc_after_break machine
228 with hardware or kernel single-step:
229 If we single-step over a breakpoint instruction, our PC will
230 point at the following instruction. If we continue and hit a
231 breakpoint instruction, our PC will point at the breakpoint
232 instruction. */
233
234 static CORE_ADDR
235 get_stop_pc (void)
236 {
237 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
238
239 if (get_thread_process (current_inferior)->stepping)
240 return stop_pc;
241 else
242 return stop_pc - the_low_target.decr_pc_after_break;
243 }
244
245 static void *
246 add_process (unsigned long pid)
247 {
248 struct process_info *process;
249
250 process = (struct process_info *) malloc (sizeof (*process));
251 memset (process, 0, sizeof (*process));
252
253 process->head.id = pid;
254 process->lwpid = pid;
255
256 add_inferior_to_list (&all_processes, &process->head);
257
258 return process;
259 }
260
261 /* Start an inferior process and returns its pid.
262 ALLARGS is a vector of program-name and args. */
263
264 static int
265 linux_create_inferior (char *program, char **allargs)
266 {
267 void *new_process;
268 int pid;
269
270 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
271 pid = vfork ();
272 #else
273 pid = fork ();
274 #endif
275 if (pid < 0)
276 perror_with_name ("fork");
277
278 if (pid == 0)
279 {
280 ptrace (PTRACE_TRACEME, 0, 0, 0);
281
282 signal (__SIGRTMIN + 1, SIG_DFL);
283
284 setpgid (0, 0);
285
286 execv (program, allargs);
287 if (errno == ENOENT)
288 execvp (program, allargs);
289
290 fprintf (stderr, "Cannot exec %s: %s.\n", program,
291 strerror (errno));
292 fflush (stderr);
293 _exit (0177);
294 }
295
296 new_process = add_process (pid);
297 add_thread (pid, new_process, pid);
298 must_set_ptrace_flags = 1;
299 new_inferior = 1;
300
301 return pid;
302 }
303
304 /* Attach to an inferior process. */
305
306 void
307 linux_attach_lwp (unsigned long pid)
308 {
309 struct process_info *new_process;
310
311 if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0)
312 {
313 if (all_threads.head != NULL)
314 {
315 /* If we fail to attach to an LWP, just warn. */
316 fprintf (stderr, "Cannot attach to process %ld: %s (%d)\n", pid,
317 strerror (errno), errno);
318 fflush (stderr);
319 return;
320 }
321 else
322 /* If we fail to attach to a process, report an error. */
323 error ("Cannot attach to process %ld: %s (%d)\n", pid,
324 strerror (errno), errno);
325 }
326
327 ptrace (PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACECLONE);
328
329 new_process = (struct process_info *) add_process (pid);
330 add_thread (pid, new_process, pid);
331 new_thread_notify (thread_id_to_gdb_id (new_process->lwpid));
332
333 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
334 brings it to a halt. We should ignore that SIGSTOP and resume the process
335 (unless this is the first process, in which case the flag will be cleared
336 in linux_attach).
337
338 On the other hand, if we are currently trying to stop all threads, we
339 should treat the new thread as if we had sent it a SIGSTOP. This works
340 because we are guaranteed that add_process added us to the end of the
341 list, and so the new thread has not yet reached wait_for_sigstop (but
342 will). */
343 if (! stopping_threads)
344 new_process->stop_expected = 1;
345 }
346
347 int
348 linux_attach (unsigned long pid)
349 {
350 struct process_info *process;
351
352 linux_attach_lwp (pid);
353
354 /* Don't ignore the initial SIGSTOP if we just attached to this process.
355 It will be collected by wait shortly. */
356 process = (struct process_info *) find_inferior_id (&all_processes, pid);
357 process->stop_expected = 0;
358
359 new_inferior = 1;
360
361 return 0;
362 }
363
364 /* Kill the inferior process. Make us have no inferior. */
365
366 static void
367 linux_kill_one_process (struct inferior_list_entry *entry)
368 {
369 struct thread_info *thread = (struct thread_info *) entry;
370 struct process_info *process = get_thread_process (thread);
371 int wstat;
372
373 /* We avoid killing the first thread here, because of a Linux kernel (at
374 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
375 the children get a chance to be reaped, it will remain a zombie
376 forever. */
377 if (entry == all_threads.head)
378 return;
379
380 do
381 {
382 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
383
384 /* Make sure it died. The loop is most likely unnecessary. */
385 wstat = linux_wait_for_event (thread);
386 } while (WIFSTOPPED (wstat));
387 }
388
389 static void
390 linux_kill (void)
391 {
392 struct thread_info *thread = (struct thread_info *) all_threads.head;
393 struct process_info *process;
394 int wstat;
395
396 if (thread == NULL)
397 return;
398
399 for_each_inferior (&all_threads, linux_kill_one_process);
400
401 /* See the comment in linux_kill_one_process. We did not kill the first
402 thread in the list, so do so now. */
403 process = get_thread_process (thread);
404 do
405 {
406 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
407
408 /* Make sure it died. The loop is most likely unnecessary. */
409 wstat = linux_wait_for_event (thread);
410 } while (WIFSTOPPED (wstat));
411
412 clear_inferiors ();
413 free (all_processes.head);
414 all_processes.head = all_processes.tail = NULL;
415 }
416
417 static void
418 linux_detach_one_process (struct inferior_list_entry *entry)
419 {
420 struct thread_info *thread = (struct thread_info *) entry;
421 struct process_info *process = get_thread_process (thread);
422
423 /* Make sure the process isn't stopped at a breakpoint that's
424 no longer there. */
425 check_removed_breakpoint (process);
426
427 /* If this process is stopped but is expecting a SIGSTOP, then make
428 sure we take care of that now. This isn't absolutely guaranteed
429 to collect the SIGSTOP, but is fairly likely to. */
430 if (process->stop_expected)
431 {
432 /* Clear stop_expected, so that the SIGSTOP will be reported. */
433 process->stop_expected = 0;
434 if (process->stopped)
435 linux_resume_one_process (&process->head, 0, 0, NULL);
436 linux_wait_for_event (thread);
437 }
438
439 /* Flush any pending changes to the process's registers. */
440 regcache_invalidate_one ((struct inferior_list_entry *)
441 get_process_thread (process));
442
443 /* Finally, let it resume. */
444 ptrace (PTRACE_DETACH, pid_of (process), 0, 0);
445 }
446
447 static int
448 linux_detach (void)
449 {
450 delete_all_breakpoints ();
451 for_each_inferior (&all_threads, linux_detach_one_process);
452 clear_inferiors ();
453 free (all_processes.head);
454 all_processes.head = all_processes.tail = NULL;
455 return 0;
456 }
457
458 static void
459 linux_join (void)
460 {
461 extern unsigned long signal_pid;
462 int status, ret;
463
464 do {
465 ret = waitpid (signal_pid, &status, 0);
466 if (WIFEXITED (status) || WIFSIGNALED (status))
467 break;
468 } while (ret != -1 || errno != ECHILD);
469 }
470
471 /* Return nonzero if the given thread is still alive. */
472 static int
473 linux_thread_alive (unsigned long lwpid)
474 {
475 if (find_inferior_id (&all_threads, lwpid) != NULL)
476 return 1;
477 else
478 return 0;
479 }
480
481 /* Return nonzero if this process stopped at a breakpoint which
482 no longer appears to be inserted. Also adjust the PC
483 appropriately to resume where the breakpoint used to be. */
484 static int
485 check_removed_breakpoint (struct process_info *event_child)
486 {
487 CORE_ADDR stop_pc;
488 struct thread_info *saved_inferior;
489
490 if (event_child->pending_is_breakpoint == 0)
491 return 0;
492
493 if (debug_threads)
494 fprintf (stderr, "Checking for breakpoint in process %ld.\n",
495 event_child->lwpid);
496
497 saved_inferior = current_inferior;
498 current_inferior = get_process_thread (event_child);
499
500 stop_pc = get_stop_pc ();
501
502 /* If the PC has changed since we stopped, then we shouldn't do
503 anything. This happens if, for instance, GDB handled the
504 decr_pc_after_break subtraction itself. */
505 if (stop_pc != event_child->pending_stop_pc)
506 {
507 if (debug_threads)
508 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
509 event_child->pending_stop_pc);
510
511 event_child->pending_is_breakpoint = 0;
512 current_inferior = saved_inferior;
513 return 0;
514 }
515
516 /* If the breakpoint is still there, we will report hitting it. */
517 if ((*the_low_target.breakpoint_at) (stop_pc))
518 {
519 if (debug_threads)
520 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
521 current_inferior = saved_inferior;
522 return 0;
523 }
524
525 if (debug_threads)
526 fprintf (stderr, "Removed breakpoint.\n");
527
528 /* For decr_pc_after_break targets, here is where we perform the
529 decrement. We go immediately from this function to resuming,
530 and can not safely call get_stop_pc () again. */
531 if (the_low_target.set_pc != NULL)
532 (*the_low_target.set_pc) (stop_pc);
533
534 /* We consumed the pending SIGTRAP. */
535 event_child->pending_is_breakpoint = 0;
536 event_child->status_pending_p = 0;
537 event_child->status_pending = 0;
538
539 current_inferior = saved_inferior;
540 return 1;
541 }
542
543 /* Return 1 if this process has an interesting status pending. This function
544 may silently resume an inferior process. */
545 static int
546 status_pending_p (struct inferior_list_entry *entry, void *dummy)
547 {
548 struct process_info *process = (struct process_info *) entry;
549
550 if (process->status_pending_p)
551 if (check_removed_breakpoint (process))
552 {
553 /* This thread was stopped at a breakpoint, and the breakpoint
554 is now gone. We were told to continue (or step...) all threads,
555 so GDB isn't trying to single-step past this breakpoint.
556 So instead of reporting the old SIGTRAP, pretend we got to
557 the breakpoint just after it was removed instead of just
558 before; resume the process. */
559 linux_resume_one_process (&process->head, 0, 0, NULL);
560 return 0;
561 }
562
563 return process->status_pending_p;
564 }
565
566 static void
567 linux_wait_for_process (struct process_info **childp, int *wstatp)
568 {
569 int ret;
570 int to_wait_for = -1;
571
572 if (*childp != NULL)
573 to_wait_for = (*childp)->lwpid;
574
575 retry:
576 while (1)
577 {
578 ret = waitpid (to_wait_for, wstatp, WNOHANG);
579
580 if (ret == -1)
581 {
582 if (errno != ECHILD)
583 perror_with_name ("waitpid");
584 }
585 else if (ret > 0)
586 break;
587
588 ret = waitpid (to_wait_for, wstatp, WNOHANG | __WCLONE);
589
590 if (ret == -1)
591 {
592 if (errno != ECHILD)
593 perror_with_name ("waitpid (WCLONE)");
594 }
595 else if (ret > 0)
596 break;
597
598 usleep (1000);
599 }
600
601 if (debug_threads
602 && (!WIFSTOPPED (*wstatp)
603 || (WSTOPSIG (*wstatp) != 32
604 && WSTOPSIG (*wstatp) != 33)))
605 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
606
607 if (to_wait_for == -1)
608 *childp = (struct process_info *) find_inferior_id (&all_processes, ret);
609
610 /* If we didn't find a process, one of two things presumably happened:
611 - A process we started and then detached from has exited. Ignore it.
612 - A process we are controlling has forked and the new child's stop
613 was reported to us by the kernel. Save its PID. */
614 if (*childp == NULL && WIFSTOPPED (*wstatp))
615 {
616 add_pid_to_list (&stopped_pids, ret);
617 goto retry;
618 }
619 else if (*childp == NULL)
620 goto retry;
621
622 (*childp)->stopped = 1;
623 (*childp)->pending_is_breakpoint = 0;
624
625 (*childp)->last_status = *wstatp;
626
627 /* Architecture-specific setup after inferior is running.
628 This needs to happen after we have attached to the inferior
629 and it is stopped for the first time, but before we access
630 any inferior registers. */
631 if (new_inferior)
632 {
633 the_low_target.arch_setup ();
634 new_inferior = 0;
635 }
636
637 if (debug_threads
638 && WIFSTOPPED (*wstatp))
639 {
640 current_inferior = (struct thread_info *)
641 find_inferior_id (&all_threads, (*childp)->lwpid);
642 /* For testing only; i386_stop_pc prints out a diagnostic. */
643 if (the_low_target.get_pc != NULL)
644 get_stop_pc ();
645 }
646 }
647
648 static int
649 linux_wait_for_event (struct thread_info *child)
650 {
651 CORE_ADDR stop_pc;
652 struct process_info *event_child;
653 int wstat;
654 int bp_status;
655
656 /* Check for a process with a pending status. */
657 /* It is possible that the user changed the pending task's registers since
658 it stopped. We correctly handle the change of PC if we hit a breakpoint
659 (in check_removed_breakpoint); signals should be reported anyway. */
660 if (child == NULL)
661 {
662 event_child = (struct process_info *)
663 find_inferior (&all_processes, status_pending_p, NULL);
664 if (debug_threads && event_child)
665 fprintf (stderr, "Got a pending child %ld\n", event_child->lwpid);
666 }
667 else
668 {
669 event_child = get_thread_process (child);
670 if (event_child->status_pending_p
671 && check_removed_breakpoint (event_child))
672 event_child = NULL;
673 }
674
675 if (event_child != NULL)
676 {
677 if (event_child->status_pending_p)
678 {
679 if (debug_threads)
680 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
681 event_child->lwpid, event_child->status_pending);
682 wstat = event_child->status_pending;
683 event_child->status_pending_p = 0;
684 event_child->status_pending = 0;
685 current_inferior = get_process_thread (event_child);
686 return wstat;
687 }
688 }
689
690 /* We only enter this loop if no process has a pending wait status. Thus
691 any action taken in response to a wait status inside this loop is
692 responding as soon as we detect the status, not after any pending
693 events. */
694 while (1)
695 {
696 if (child == NULL)
697 event_child = NULL;
698 else
699 event_child = get_thread_process (child);
700
701 linux_wait_for_process (&event_child, &wstat);
702
703 if (event_child == NULL)
704 error ("event from unknown child");
705
706 current_inferior = (struct thread_info *)
707 find_inferior_id (&all_threads, event_child->lwpid);
708
709 /* Check for thread exit. */
710 if (! WIFSTOPPED (wstat))
711 {
712 if (debug_threads)
713 fprintf (stderr, "LWP %ld exiting\n", event_child->head.id);
714
715 /* If the last thread is exiting, just return. */
716 if (all_threads.head == all_threads.tail)
717 return wstat;
718
719 dead_thread_notify (thread_id_to_gdb_id (event_child->lwpid));
720
721 remove_inferior (&all_processes, &event_child->head);
722 free (event_child);
723 remove_thread (current_inferior);
724 current_inferior = (struct thread_info *) all_threads.head;
725
726 /* If we were waiting for this particular child to do something...
727 well, it did something. */
728 if (child != NULL)
729 return wstat;
730
731 /* Wait for a more interesting event. */
732 continue;
733 }
734
735 if (WIFSTOPPED (wstat)
736 && WSTOPSIG (wstat) == SIGSTOP
737 && event_child->stop_expected)
738 {
739 if (debug_threads)
740 fprintf (stderr, "Expected stop.\n");
741 event_child->stop_expected = 0;
742 linux_resume_one_process (&event_child->head,
743 event_child->stepping, 0, NULL);
744 continue;
745 }
746
747 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
748 && wstat >> 16 != 0)
749 {
750 handle_extended_wait (event_child, wstat);
751 continue;
752 }
753
754 /* If GDB is not interested in this signal, don't stop other
755 threads, and don't report it to GDB. Just resume the
756 inferior right away. We do this for threading-related
757 signals as well as any that GDB specifically requested we
758 ignore. But never ignore SIGSTOP if we sent it ourselves,
759 and do not ignore signals when stepping - they may require
760 special handling to skip the signal handler. */
761 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
762 thread library? */
763 if (WIFSTOPPED (wstat)
764 && !event_child->stepping
765 && (
766 #ifdef USE_THREAD_DB
767 (thread_db_active && (WSTOPSIG (wstat) == __SIGRTMIN
768 || WSTOPSIG (wstat) == __SIGRTMIN + 1))
769 ||
770 #endif
771 (pass_signals[target_signal_from_host (WSTOPSIG (wstat))]
772 && (WSTOPSIG (wstat) != SIGSTOP || !stopping_threads))))
773 {
774 siginfo_t info, *info_p;
775
776 if (debug_threads)
777 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
778 WSTOPSIG (wstat), event_child->head.id);
779
780 if (ptrace (PTRACE_GETSIGINFO, event_child->lwpid, 0, &info) == 0)
781 info_p = &info;
782 else
783 info_p = NULL;
784 linux_resume_one_process (&event_child->head,
785 event_child->stepping,
786 WSTOPSIG (wstat), info_p);
787 continue;
788 }
789
790 /* If this event was not handled above, and is not a SIGTRAP, report
791 it. */
792 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGTRAP)
793 return wstat;
794
795 /* If this target does not support breakpoints, we simply report the
796 SIGTRAP; it's of no concern to us. */
797 if (the_low_target.get_pc == NULL)
798 return wstat;
799
800 stop_pc = get_stop_pc ();
801
802 /* bp_reinsert will only be set if we were single-stepping.
803 Notice that we will resume the process after hitting
804 a gdbserver breakpoint; single-stepping to/over one
805 is not supported (yet). */
806 if (event_child->bp_reinsert != 0)
807 {
808 if (debug_threads)
809 fprintf (stderr, "Reinserted breakpoint.\n");
810 reinsert_breakpoint (event_child->bp_reinsert);
811 event_child->bp_reinsert = 0;
812
813 /* Clear the single-stepping flag and SIGTRAP as we resume. */
814 linux_resume_one_process (&event_child->head, 0, 0, NULL);
815 continue;
816 }
817
818 bp_status = check_breakpoints (stop_pc);
819
820 if (bp_status != 0)
821 {
822 if (debug_threads)
823 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
824
825 /* We hit one of our own breakpoints. We mark it as a pending
826 breakpoint, so that check_removed_breakpoint () will do the PC
827 adjustment for us at the appropriate time. */
828 event_child->pending_is_breakpoint = 1;
829 event_child->pending_stop_pc = stop_pc;
830
831 /* We may need to put the breakpoint back. We continue in the event
832 loop instead of simply replacing the breakpoint right away,
833 in order to not lose signals sent to the thread that hit the
834 breakpoint. Unfortunately this increases the window where another
835 thread could sneak past the removed breakpoint. For the current
836 use of server-side breakpoints (thread creation) this is
837 acceptable; but it needs to be considered before this breakpoint
838 mechanism can be used in more general ways. For some breakpoints
839 it may be necessary to stop all other threads, but that should
840 be avoided where possible.
841
842 If breakpoint_reinsert_addr is NULL, that means that we can
843 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
844 mark it for reinsertion, and single-step.
845
846 Otherwise, call the target function to figure out where we need
847 our temporary breakpoint, create it, and continue executing this
848 process. */
849 if (bp_status == 2)
850 /* No need to reinsert. */
851 linux_resume_one_process (&event_child->head, 0, 0, NULL);
852 else if (the_low_target.breakpoint_reinsert_addr == NULL)
853 {
854 event_child->bp_reinsert = stop_pc;
855 uninsert_breakpoint (stop_pc);
856 linux_resume_one_process (&event_child->head, 1, 0, NULL);
857 }
858 else
859 {
860 reinsert_breakpoint_by_bp
861 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
862 linux_resume_one_process (&event_child->head, 0, 0, NULL);
863 }
864
865 continue;
866 }
867
868 if (debug_threads)
869 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
870
871 /* If we were single-stepping, we definitely want to report the
872 SIGTRAP. The single-step operation has completed, so also
873 clear the stepping flag; in general this does not matter,
874 because the SIGTRAP will be reported to the client, which
875 will give us a new action for this thread, but clear it for
876 consistency anyway. It's safe to clear the stepping flag
877 because the only consumer of get_stop_pc () after this point
878 is check_removed_breakpoint, and pending_is_breakpoint is not
879 set. It might be wiser to use a step_completed flag instead. */
880 if (event_child->stepping)
881 {
882 event_child->stepping = 0;
883 return wstat;
884 }
885
886 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
887 Check if it is a breakpoint, and if so mark the process information
888 accordingly. This will handle both the necessary fiddling with the
889 PC on decr_pc_after_break targets and suppressing extra threads
890 hitting a breakpoint if two hit it at once and then GDB removes it
891 after the first is reported. Arguably it would be better to report
892 multiple threads hitting breakpoints simultaneously, but the current
893 remote protocol does not allow this. */
894 if ((*the_low_target.breakpoint_at) (stop_pc))
895 {
896 event_child->pending_is_breakpoint = 1;
897 event_child->pending_stop_pc = stop_pc;
898 }
899
900 return wstat;
901 }
902
903 /* NOTREACHED */
904 return 0;
905 }
906
907 /* Wait for process, returns status. */
908
909 static unsigned char
910 linux_wait (char *status)
911 {
912 int w;
913 struct thread_info *child = NULL;
914
915 retry:
916 /* If we were only supposed to resume one thread, only wait for
917 that thread - if it's still alive. If it died, however - which
918 can happen if we're coming from the thread death case below -
919 then we need to make sure we restart the other threads. We could
920 pick a thread at random or restart all; restarting all is less
921 arbitrary. */
922 if (cont_thread != 0 && cont_thread != -1)
923 {
924 child = (struct thread_info *) find_inferior_id (&all_threads,
925 cont_thread);
926
927 /* No stepping, no signal - unless one is pending already, of course. */
928 if (child == NULL)
929 {
930 struct thread_resume resume_info;
931 resume_info.thread = -1;
932 resume_info.step = resume_info.sig = resume_info.leave_stopped = 0;
933 linux_resume (&resume_info);
934 }
935 }
936
937 w = linux_wait_for_event (child);
938 stop_all_processes ();
939
940 if (must_set_ptrace_flags)
941 {
942 ptrace (PTRACE_SETOPTIONS, inferior_pid, 0, PTRACE_O_TRACECLONE);
943 must_set_ptrace_flags = 0;
944 }
945
946 /* If we are waiting for a particular child, and it exited,
947 linux_wait_for_event will return its exit status. Similarly if
948 the last child exited. If this is not the last child, however,
949 do not report it as exited until there is a 'thread exited' response
950 available in the remote protocol. Instead, just wait for another event.
951 This should be safe, because if the thread crashed we will already
952 have reported the termination signal to GDB; that should stop any
953 in-progress stepping operations, etc.
954
955 Report the exit status of the last thread to exit. This matches
956 LinuxThreads' behavior. */
957
958 if (all_threads.head == all_threads.tail)
959 {
960 if (WIFEXITED (w))
961 {
962 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
963 *status = 'W';
964 clear_inferiors ();
965 free (all_processes.head);
966 all_processes.head = all_processes.tail = NULL;
967 return WEXITSTATUS (w);
968 }
969 else if (!WIFSTOPPED (w))
970 {
971 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
972 *status = 'X';
973 clear_inferiors ();
974 free (all_processes.head);
975 all_processes.head = all_processes.tail = NULL;
976 return target_signal_from_host (WTERMSIG (w));
977 }
978 }
979 else
980 {
981 if (!WIFSTOPPED (w))
982 goto retry;
983 }
984
985 *status = 'T';
986 return target_signal_from_host (WSTOPSIG (w));
987 }
988
989 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
990 thread groups are in use, we need to use tkill. */
991
992 static int
993 kill_lwp (unsigned long lwpid, int signo)
994 {
995 static int tkill_failed;
996
997 errno = 0;
998
999 #ifdef SYS_tkill
1000 if (!tkill_failed)
1001 {
1002 int ret = syscall (SYS_tkill, lwpid, signo);
1003 if (errno != ENOSYS)
1004 return ret;
1005 errno = 0;
1006 tkill_failed = 1;
1007 }
1008 #endif
1009
1010 return kill (lwpid, signo);
1011 }
1012
1013 static void
1014 send_sigstop (struct inferior_list_entry *entry)
1015 {
1016 struct process_info *process = (struct process_info *) entry;
1017
1018 if (process->stopped)
1019 return;
1020
1021 /* If we already have a pending stop signal for this process, don't
1022 send another. */
1023 if (process->stop_expected)
1024 {
1025 if (debug_threads)
1026 fprintf (stderr, "Have pending sigstop for process %ld\n",
1027 process->lwpid);
1028
1029 /* We clear the stop_expected flag so that wait_for_sigstop
1030 will receive the SIGSTOP event (instead of silently resuming and
1031 waiting again). It'll be reset below. */
1032 process->stop_expected = 0;
1033 return;
1034 }
1035
1036 if (debug_threads)
1037 fprintf (stderr, "Sending sigstop to process %ld\n", process->head.id);
1038
1039 kill_lwp (process->head.id, SIGSTOP);
1040 }
1041
1042 static void
1043 wait_for_sigstop (struct inferior_list_entry *entry)
1044 {
1045 struct process_info *process = (struct process_info *) entry;
1046 struct thread_info *saved_inferior, *thread;
1047 int wstat;
1048 unsigned long saved_tid;
1049
1050 if (process->stopped)
1051 return;
1052
1053 saved_inferior = current_inferior;
1054 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1055 thread = (struct thread_info *) find_inferior_id (&all_threads,
1056 process->lwpid);
1057 wstat = linux_wait_for_event (thread);
1058
1059 /* If we stopped with a non-SIGSTOP signal, save it for later
1060 and record the pending SIGSTOP. If the process exited, just
1061 return. */
1062 if (WIFSTOPPED (wstat)
1063 && WSTOPSIG (wstat) != SIGSTOP)
1064 {
1065 if (debug_threads)
1066 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1067 process->lwpid, wstat);
1068 process->status_pending_p = 1;
1069 process->status_pending = wstat;
1070 process->stop_expected = 1;
1071 }
1072
1073 if (linux_thread_alive (saved_tid))
1074 current_inferior = saved_inferior;
1075 else
1076 {
1077 if (debug_threads)
1078 fprintf (stderr, "Previously current thread died.\n");
1079
1080 /* Set a valid thread as current. */
1081 set_desired_inferior (0);
1082 }
1083 }
1084
1085 static void
1086 stop_all_processes (void)
1087 {
1088 stopping_threads = 1;
1089 for_each_inferior (&all_processes, send_sigstop);
1090 for_each_inferior (&all_processes, wait_for_sigstop);
1091 stopping_threads = 0;
1092 }
1093
1094 /* Resume execution of the inferior process.
1095 If STEP is nonzero, single-step it.
1096 If SIGNAL is nonzero, give it that signal. */
1097
1098 static void
1099 linux_resume_one_process (struct inferior_list_entry *entry,
1100 int step, int signal, siginfo_t *info)
1101 {
1102 struct process_info *process = (struct process_info *) entry;
1103 struct thread_info *saved_inferior;
1104
1105 if (process->stopped == 0)
1106 return;
1107
1108 /* If we have pending signals or status, and a new signal, enqueue the
1109 signal. Also enqueue the signal if we are waiting to reinsert a
1110 breakpoint; it will be picked up again below. */
1111 if (signal != 0
1112 && (process->status_pending_p || process->pending_signals != NULL
1113 || process->bp_reinsert != 0))
1114 {
1115 struct pending_signals *p_sig;
1116 p_sig = malloc (sizeof (*p_sig));
1117 p_sig->prev = process->pending_signals;
1118 p_sig->signal = signal;
1119 if (info == NULL)
1120 memset (&p_sig->info, 0, sizeof (siginfo_t));
1121 else
1122 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1123 process->pending_signals = p_sig;
1124 }
1125
1126 if (process->status_pending_p && !check_removed_breakpoint (process))
1127 return;
1128
1129 saved_inferior = current_inferior;
1130 current_inferior = get_process_thread (process);
1131
1132 if (debug_threads)
1133 fprintf (stderr, "Resuming process %ld (%s, signal %d, stop %s)\n", inferior_pid,
1134 step ? "step" : "continue", signal,
1135 process->stop_expected ? "expected" : "not expected");
1136
1137 /* This bit needs some thinking about. If we get a signal that
1138 we must report while a single-step reinsert is still pending,
1139 we often end up resuming the thread. It might be better to
1140 (ew) allow a stack of pending events; then we could be sure that
1141 the reinsert happened right away and not lose any signals.
1142
1143 Making this stack would also shrink the window in which breakpoints are
1144 uninserted (see comment in linux_wait_for_process) but not enough for
1145 complete correctness, so it won't solve that problem. It may be
1146 worthwhile just to solve this one, however. */
1147 if (process->bp_reinsert != 0)
1148 {
1149 if (debug_threads)
1150 fprintf (stderr, " pending reinsert at %08lx", (long)process->bp_reinsert);
1151 if (step == 0)
1152 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1153 step = 1;
1154
1155 /* Postpone any pending signal. It was enqueued above. */
1156 signal = 0;
1157 }
1158
1159 check_removed_breakpoint (process);
1160
1161 if (debug_threads && the_low_target.get_pc != NULL)
1162 {
1163 fprintf (stderr, " ");
1164 (*the_low_target.get_pc) ();
1165 }
1166
1167 /* If we have pending signals, consume one unless we are trying to reinsert
1168 a breakpoint. */
1169 if (process->pending_signals != NULL && process->bp_reinsert == 0)
1170 {
1171 struct pending_signals **p_sig;
1172
1173 p_sig = &process->pending_signals;
1174 while ((*p_sig)->prev != NULL)
1175 p_sig = &(*p_sig)->prev;
1176
1177 signal = (*p_sig)->signal;
1178 if ((*p_sig)->info.si_signo != 0)
1179 ptrace (PTRACE_SETSIGINFO, process->lwpid, 0, &(*p_sig)->info);
1180
1181 free (*p_sig);
1182 *p_sig = NULL;
1183 }
1184
1185 regcache_invalidate_one ((struct inferior_list_entry *)
1186 get_process_thread (process));
1187 errno = 0;
1188 process->stopped = 0;
1189 process->stepping = step;
1190 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, process->lwpid, 0, signal);
1191
1192 current_inferior = saved_inferior;
1193 if (errno)
1194 perror_with_name ("ptrace");
1195 }
1196
1197 static struct thread_resume *resume_ptr;
1198
1199 /* This function is called once per thread. We look up the thread
1200 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1201 resume request.
1202
1203 This algorithm is O(threads * resume elements), but resume elements
1204 is small (and will remain small at least until GDB supports thread
1205 suspension). */
1206 static void
1207 linux_set_resume_request (struct inferior_list_entry *entry)
1208 {
1209 struct process_info *process;
1210 struct thread_info *thread;
1211 int ndx;
1212
1213 thread = (struct thread_info *) entry;
1214 process = get_thread_process (thread);
1215
1216 ndx = 0;
1217 while (resume_ptr[ndx].thread != -1 && resume_ptr[ndx].thread != entry->id)
1218 ndx++;
1219
1220 process->resume = &resume_ptr[ndx];
1221 }
1222
1223 /* This function is called once per thread. We check the thread's resume
1224 request, which will tell us whether to resume, step, or leave the thread
1225 stopped; and what signal, if any, it should be sent. For threads which
1226 we aren't explicitly told otherwise, we preserve the stepping flag; this
1227 is used for stepping over gdbserver-placed breakpoints. */
1228
1229 static void
1230 linux_continue_one_thread (struct inferior_list_entry *entry)
1231 {
1232 struct process_info *process;
1233 struct thread_info *thread;
1234 int step;
1235
1236 thread = (struct thread_info *) entry;
1237 process = get_thread_process (thread);
1238
1239 if (process->resume->leave_stopped)
1240 return;
1241
1242 if (process->resume->thread == -1)
1243 step = process->stepping || process->resume->step;
1244 else
1245 step = process->resume->step;
1246
1247 linux_resume_one_process (&process->head, step, process->resume->sig, NULL);
1248
1249 process->resume = NULL;
1250 }
1251
1252 /* This function is called once per thread. We check the thread's resume
1253 request, which will tell us whether to resume, step, or leave the thread
1254 stopped; and what signal, if any, it should be sent. We queue any needed
1255 signals, since we won't actually resume. We already have a pending event
1256 to report, so we don't need to preserve any step requests; they should
1257 be re-issued if necessary. */
1258
1259 static void
1260 linux_queue_one_thread (struct inferior_list_entry *entry)
1261 {
1262 struct process_info *process;
1263 struct thread_info *thread;
1264
1265 thread = (struct thread_info *) entry;
1266 process = get_thread_process (thread);
1267
1268 if (process->resume->leave_stopped)
1269 return;
1270
1271 /* If we have a new signal, enqueue the signal. */
1272 if (process->resume->sig != 0)
1273 {
1274 struct pending_signals *p_sig;
1275 p_sig = malloc (sizeof (*p_sig));
1276 p_sig->prev = process->pending_signals;
1277 p_sig->signal = process->resume->sig;
1278 memset (&p_sig->info, 0, sizeof (siginfo_t));
1279
1280 /* If this is the same signal we were previously stopped by,
1281 make sure to queue its siginfo. We can ignore the return
1282 value of ptrace; if it fails, we'll skip
1283 PTRACE_SETSIGINFO. */
1284 if (WIFSTOPPED (process->last_status)
1285 && WSTOPSIG (process->last_status) == process->resume->sig)
1286 ptrace (PTRACE_GETSIGINFO, process->lwpid, 0, &p_sig->info);
1287
1288 process->pending_signals = p_sig;
1289 }
1290
1291 process->resume = NULL;
1292 }
1293
1294 /* Set DUMMY if this process has an interesting status pending. */
1295 static int
1296 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1297 {
1298 struct process_info *process = (struct process_info *) entry;
1299
1300 /* Processes which will not be resumed are not interesting, because
1301 we might not wait for them next time through linux_wait. */
1302 if (process->resume->leave_stopped)
1303 return 0;
1304
1305 /* If this thread has a removed breakpoint, we won't have any
1306 events to report later, so check now. check_removed_breakpoint
1307 may clear status_pending_p. We avoid calling check_removed_breakpoint
1308 for any thread that we are not otherwise going to resume - this
1309 lets us preserve stopped status when two threads hit a breakpoint.
1310 GDB removes the breakpoint to single-step a particular thread
1311 past it, then re-inserts it and resumes all threads. We want
1312 to report the second thread without resuming it in the interim. */
1313 if (process->status_pending_p)
1314 check_removed_breakpoint (process);
1315
1316 if (process->status_pending_p)
1317 * (int *) flag_p = 1;
1318
1319 return 0;
1320 }
1321
1322 static void
1323 linux_resume (struct thread_resume *resume_info)
1324 {
1325 int pending_flag;
1326
1327 /* Yes, the use of a global here is rather ugly. */
1328 resume_ptr = resume_info;
1329
1330 for_each_inferior (&all_threads, linux_set_resume_request);
1331
1332 /* If there is a thread which would otherwise be resumed, which
1333 has a pending status, then don't resume any threads - we can just
1334 report the pending status. Make sure to queue any signals
1335 that would otherwise be sent. */
1336 pending_flag = 0;
1337 find_inferior (&all_processes, resume_status_pending_p, &pending_flag);
1338
1339 if (debug_threads)
1340 {
1341 if (pending_flag)
1342 fprintf (stderr, "Not resuming, pending status\n");
1343 else
1344 fprintf (stderr, "Resuming, no pending status\n");
1345 }
1346
1347 if (pending_flag)
1348 for_each_inferior (&all_threads, linux_queue_one_thread);
1349 else
1350 for_each_inferior (&all_threads, linux_continue_one_thread);
1351 }
1352
1353 #ifdef HAVE_LINUX_USRREGS
1354
1355 int
1356 register_addr (int regnum)
1357 {
1358 int addr;
1359
1360 if (regnum < 0 || regnum >= the_low_target.num_regs)
1361 error ("Invalid register number %d.", regnum);
1362
1363 addr = the_low_target.regmap[regnum];
1364
1365 return addr;
1366 }
1367
1368 /* Fetch one register. */
1369 static void
1370 fetch_register (int regno)
1371 {
1372 CORE_ADDR regaddr;
1373 int i, size;
1374 char *buf;
1375
1376 if (regno >= the_low_target.num_regs)
1377 return;
1378 if ((*the_low_target.cannot_fetch_register) (regno))
1379 return;
1380
1381 regaddr = register_addr (regno);
1382 if (regaddr == -1)
1383 return;
1384 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1385 & - sizeof (PTRACE_XFER_TYPE);
1386 buf = alloca (size);
1387 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1388 {
1389 errno = 0;
1390 *(PTRACE_XFER_TYPE *) (buf + i) =
1391 ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1392 regaddr += sizeof (PTRACE_XFER_TYPE);
1393 if (errno != 0)
1394 {
1395 /* Warning, not error, in case we are attached; sometimes the
1396 kernel doesn't let us at the registers. */
1397 char *err = strerror (errno);
1398 char *msg = alloca (strlen (err) + 128);
1399 sprintf (msg, "reading register %d: %s", regno, err);
1400 error (msg);
1401 goto error_exit;
1402 }
1403 }
1404 if (the_low_target.left_pad_xfer
1405 && register_size (regno) < sizeof (PTRACE_XFER_TYPE))
1406 supply_register (regno, (buf + sizeof (PTRACE_XFER_TYPE)
1407 - register_size (regno)));
1408 else
1409 supply_register (regno, buf);
1410
1411 error_exit:;
1412 }
1413
1414 /* Fetch all registers, or just one, from the child process. */
1415 static void
1416 usr_fetch_inferior_registers (int regno)
1417 {
1418 if (regno == -1 || regno == 0)
1419 for (regno = 0; regno < the_low_target.num_regs; regno++)
1420 fetch_register (regno);
1421 else
1422 fetch_register (regno);
1423 }
1424
1425 /* Store our register values back into the inferior.
1426 If REGNO is -1, do this for all registers.
1427 Otherwise, REGNO specifies which register (so we can save time). */
1428 static void
1429 usr_store_inferior_registers (int regno)
1430 {
1431 CORE_ADDR regaddr;
1432 int i, size;
1433 char *buf;
1434
1435 if (regno >= 0)
1436 {
1437 if (regno >= the_low_target.num_regs)
1438 return;
1439
1440 if ((*the_low_target.cannot_store_register) (regno) == 1)
1441 return;
1442
1443 regaddr = register_addr (regno);
1444 if (regaddr == -1)
1445 return;
1446 errno = 0;
1447 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1448 & - sizeof (PTRACE_XFER_TYPE);
1449 buf = alloca (size);
1450 memset (buf, 0, size);
1451 if (the_low_target.left_pad_xfer
1452 && register_size (regno) < sizeof (PTRACE_XFER_TYPE))
1453 collect_register (regno, (buf + sizeof (PTRACE_XFER_TYPE)
1454 - register_size (regno)));
1455 else
1456 collect_register (regno, buf);
1457 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1458 {
1459 errno = 0;
1460 ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr,
1461 *(PTRACE_XFER_TYPE *) (buf + i));
1462 if (errno != 0)
1463 {
1464 if ((*the_low_target.cannot_store_register) (regno) == 0)
1465 {
1466 char *err = strerror (errno);
1467 char *msg = alloca (strlen (err) + 128);
1468 sprintf (msg, "writing register %d: %s",
1469 regno, err);
1470 error (msg);
1471 return;
1472 }
1473 }
1474 regaddr += sizeof (PTRACE_XFER_TYPE);
1475 }
1476 }
1477 else
1478 for (regno = 0; regno < the_low_target.num_regs; regno++)
1479 usr_store_inferior_registers (regno);
1480 }
1481 #endif /* HAVE_LINUX_USRREGS */
1482
1483
1484
1485 #ifdef HAVE_LINUX_REGSETS
1486
1487 static int
1488 regsets_fetch_inferior_registers ()
1489 {
1490 struct regset_info *regset;
1491 int saw_general_regs = 0;
1492
1493 regset = target_regsets;
1494
1495 while (regset->size >= 0)
1496 {
1497 void *buf;
1498 int res;
1499
1500 if (regset->size == 0)
1501 {
1502 regset ++;
1503 continue;
1504 }
1505
1506 buf = malloc (regset->size);
1507 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1508 if (res < 0)
1509 {
1510 if (errno == EIO)
1511 {
1512 /* If we get EIO on the first regset, do not try regsets again.
1513 If we get EIO on a later regset, disable that regset. */
1514 if (regset == target_regsets)
1515 {
1516 use_regsets_p = 0;
1517 return -1;
1518 }
1519 else
1520 {
1521 regset->size = 0;
1522 continue;
1523 }
1524 }
1525 else
1526 {
1527 char s[256];
1528 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld",
1529 inferior_pid);
1530 perror (s);
1531 }
1532 }
1533 else if (regset->type == GENERAL_REGS)
1534 saw_general_regs = 1;
1535 regset->store_function (buf);
1536 regset ++;
1537 }
1538 if (saw_general_regs)
1539 return 0;
1540 else
1541 return 1;
1542 }
1543
1544 static int
1545 regsets_store_inferior_registers ()
1546 {
1547 struct regset_info *regset;
1548 int saw_general_regs = 0;
1549
1550 regset = target_regsets;
1551
1552 while (regset->size >= 0)
1553 {
1554 void *buf;
1555 int res;
1556
1557 if (regset->size == 0)
1558 {
1559 regset ++;
1560 continue;
1561 }
1562
1563 buf = malloc (regset->size);
1564
1565 /* First fill the buffer with the current register set contents,
1566 in case there are any items in the kernel's regset that are
1567 not in gdbserver's regcache. */
1568 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1569
1570 if (res == 0)
1571 {
1572 /* Then overlay our cached registers on that. */
1573 regset->fill_function (buf);
1574
1575 /* Only now do we write the register set. */
1576 res = ptrace (regset->set_request, inferior_pid, 0, buf);
1577 }
1578
1579 if (res < 0)
1580 {
1581 if (errno == EIO)
1582 {
1583 /* If we get EIO on the first regset, do not try regsets again.
1584 If we get EIO on a later regset, disable that regset. */
1585 if (regset == target_regsets)
1586 {
1587 use_regsets_p = 0;
1588 return -1;
1589 }
1590 else
1591 {
1592 regset->size = 0;
1593 continue;
1594 }
1595 }
1596 else
1597 {
1598 perror ("Warning: ptrace(regsets_store_inferior_registers)");
1599 }
1600 }
1601 else if (regset->type == GENERAL_REGS)
1602 saw_general_regs = 1;
1603 regset ++;
1604 free (buf);
1605 }
1606 if (saw_general_regs)
1607 return 0;
1608 else
1609 return 1;
1610 return 0;
1611 }
1612
1613 #endif /* HAVE_LINUX_REGSETS */
1614
1615
1616 void
1617 linux_fetch_registers (int regno)
1618 {
1619 #ifdef HAVE_LINUX_REGSETS
1620 if (use_regsets_p)
1621 {
1622 if (regsets_fetch_inferior_registers () == 0)
1623 return;
1624 }
1625 #endif
1626 #ifdef HAVE_LINUX_USRREGS
1627 usr_fetch_inferior_registers (regno);
1628 #endif
1629 }
1630
1631 void
1632 linux_store_registers (int regno)
1633 {
1634 #ifdef HAVE_LINUX_REGSETS
1635 if (use_regsets_p)
1636 {
1637 if (regsets_store_inferior_registers () == 0)
1638 return;
1639 }
1640 #endif
1641 #ifdef HAVE_LINUX_USRREGS
1642 usr_store_inferior_registers (regno);
1643 #endif
1644 }
1645
1646
1647 /* Copy LEN bytes from inferior's memory starting at MEMADDR
1648 to debugger memory starting at MYADDR. */
1649
1650 static int
1651 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
1652 {
1653 register int i;
1654 /* Round starting address down to longword boundary. */
1655 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1656 /* Round ending address up; get number of longwords that makes. */
1657 register int count
1658 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
1659 / sizeof (PTRACE_XFER_TYPE);
1660 /* Allocate buffer of that many longwords. */
1661 register PTRACE_XFER_TYPE *buffer
1662 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1663 int fd;
1664 char filename[64];
1665
1666 /* Try using /proc. Don't bother for one word. */
1667 if (len >= 3 * sizeof (long))
1668 {
1669 /* We could keep this file open and cache it - possibly one per
1670 thread. That requires some juggling, but is even faster. */
1671 sprintf (filename, "/proc/%ld/mem", inferior_pid);
1672 fd = open (filename, O_RDONLY | O_LARGEFILE);
1673 if (fd == -1)
1674 goto no_proc;
1675
1676 /* If pread64 is available, use it. It's faster if the kernel
1677 supports it (only one syscall), and it's 64-bit safe even on
1678 32-bit platforms (for instance, SPARC debugging a SPARC64
1679 application). */
1680 #ifdef HAVE_PREAD64
1681 if (pread64 (fd, myaddr, len, memaddr) != len)
1682 #else
1683 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
1684 #endif
1685 {
1686 close (fd);
1687 goto no_proc;
1688 }
1689
1690 close (fd);
1691 return 0;
1692 }
1693
1694 no_proc:
1695 /* Read all the longwords */
1696 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1697 {
1698 errno = 0;
1699 buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, 0);
1700 if (errno)
1701 return errno;
1702 }
1703
1704 /* Copy appropriate bytes out of the buffer. */
1705 memcpy (myaddr, (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), len);
1706
1707 return 0;
1708 }
1709
1710 /* Copy LEN bytes of data from debugger memory at MYADDR
1711 to inferior's memory at MEMADDR.
1712 On failure (cannot write the inferior)
1713 returns the value of errno. */
1714
1715 static int
1716 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
1717 {
1718 register int i;
1719 /* Round starting address down to longword boundary. */
1720 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1721 /* Round ending address up; get number of longwords that makes. */
1722 register int count
1723 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
1724 /* Allocate buffer of that many longwords. */
1725 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1726 extern int errno;
1727
1728 if (debug_threads)
1729 {
1730 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
1731 }
1732
1733 /* Fill start and end extra bytes of buffer with existing memory data. */
1734
1735 buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1736 (PTRACE_ARG3_TYPE) addr, 0);
1737
1738 if (count > 1)
1739 {
1740 buffer[count - 1]
1741 = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1742 (PTRACE_ARG3_TYPE) (addr + (count - 1)
1743 * sizeof (PTRACE_XFER_TYPE)),
1744 0);
1745 }
1746
1747 /* Copy data to be written over corresponding part of buffer */
1748
1749 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
1750
1751 /* Write the entire buffer. */
1752
1753 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1754 {
1755 errno = 0;
1756 ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
1757 if (errno)
1758 return errno;
1759 }
1760
1761 return 0;
1762 }
1763
1764 static int linux_supports_tracefork_flag;
1765
1766 /* Helper functions for linux_test_for_tracefork, called via clone (). */
1767
1768 static int
1769 linux_tracefork_grandchild (void *arg)
1770 {
1771 _exit (0);
1772 }
1773
1774 #define STACK_SIZE 4096
1775
1776 static int
1777 linux_tracefork_child (void *arg)
1778 {
1779 ptrace (PTRACE_TRACEME, 0, 0, 0);
1780 kill (getpid (), SIGSTOP);
1781 #ifdef __ia64__
1782 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
1783 CLONE_VM | SIGCHLD, NULL);
1784 #else
1785 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
1786 CLONE_VM | SIGCHLD, NULL);
1787 #endif
1788 _exit (0);
1789 }
1790
1791 /* Wrapper function for waitpid which handles EINTR. */
1792
1793 static int
1794 my_waitpid (int pid, int *status, int flags)
1795 {
1796 int ret;
1797 do
1798 {
1799 ret = waitpid (pid, status, flags);
1800 }
1801 while (ret == -1 && errno == EINTR);
1802
1803 return ret;
1804 }
1805
1806 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
1807 sure that we can enable the option, and that it had the desired
1808 effect. */
1809
1810 static void
1811 linux_test_for_tracefork (void)
1812 {
1813 int child_pid, ret, status;
1814 long second_pid;
1815 char *stack = malloc (STACK_SIZE * 4);
1816
1817 linux_supports_tracefork_flag = 0;
1818
1819 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
1820 #ifdef __ia64__
1821 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
1822 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1823 #else
1824 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
1825 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1826 #endif
1827 if (child_pid == -1)
1828 perror_with_name ("clone");
1829
1830 ret = my_waitpid (child_pid, &status, 0);
1831 if (ret == -1)
1832 perror_with_name ("waitpid");
1833 else if (ret != child_pid)
1834 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
1835 if (! WIFSTOPPED (status))
1836 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
1837
1838 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
1839 if (ret != 0)
1840 {
1841 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1842 if (ret != 0)
1843 {
1844 warning ("linux_test_for_tracefork: failed to kill child");
1845 return;
1846 }
1847
1848 ret = my_waitpid (child_pid, &status, 0);
1849 if (ret != child_pid)
1850 warning ("linux_test_for_tracefork: failed to wait for killed child");
1851 else if (!WIFSIGNALED (status))
1852 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
1853 "killed child", status);
1854
1855 return;
1856 }
1857
1858 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
1859 if (ret != 0)
1860 warning ("linux_test_for_tracefork: failed to resume child");
1861
1862 ret = my_waitpid (child_pid, &status, 0);
1863
1864 if (ret == child_pid && WIFSTOPPED (status)
1865 && status >> 16 == PTRACE_EVENT_FORK)
1866 {
1867 second_pid = 0;
1868 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
1869 if (ret == 0 && second_pid != 0)
1870 {
1871 int second_status;
1872
1873 linux_supports_tracefork_flag = 1;
1874 my_waitpid (second_pid, &second_status, 0);
1875 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
1876 if (ret != 0)
1877 warning ("linux_test_for_tracefork: failed to kill second child");
1878 my_waitpid (second_pid, &status, 0);
1879 }
1880 }
1881 else
1882 warning ("linux_test_for_tracefork: unexpected result from waitpid "
1883 "(%d, status 0x%x)", ret, status);
1884
1885 do
1886 {
1887 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1888 if (ret != 0)
1889 warning ("linux_test_for_tracefork: failed to kill child");
1890 my_waitpid (child_pid, &status, 0);
1891 }
1892 while (WIFSTOPPED (status));
1893
1894 free (stack);
1895 }
1896
1897
1898 static void
1899 linux_look_up_symbols (void)
1900 {
1901 #ifdef USE_THREAD_DB
1902 if (thread_db_active)
1903 return;
1904
1905 thread_db_active = thread_db_init (!linux_supports_tracefork_flag);
1906 #endif
1907 }
1908
1909 static void
1910 linux_request_interrupt (void)
1911 {
1912 extern unsigned long signal_pid;
1913
1914 if (cont_thread != 0 && cont_thread != -1)
1915 {
1916 struct process_info *process;
1917
1918 process = get_thread_process (current_inferior);
1919 kill_lwp (process->lwpid, SIGINT);
1920 }
1921 else
1922 kill_lwp (signal_pid, SIGINT);
1923 }
1924
1925 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
1926 to debugger memory starting at MYADDR. */
1927
1928 static int
1929 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
1930 {
1931 char filename[PATH_MAX];
1932 int fd, n;
1933
1934 snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid);
1935
1936 fd = open (filename, O_RDONLY);
1937 if (fd < 0)
1938 return -1;
1939
1940 if (offset != (CORE_ADDR) 0
1941 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
1942 n = -1;
1943 else
1944 n = read (fd, myaddr, len);
1945
1946 close (fd);
1947
1948 return n;
1949 }
1950
1951 /* These watchpoint related wrapper functions simply pass on the function call
1952 if the target has registered a corresponding function. */
1953
1954 static int
1955 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
1956 {
1957 if (the_low_target.insert_watchpoint != NULL)
1958 return the_low_target.insert_watchpoint (type, addr, len);
1959 else
1960 /* Unsupported (see target.h). */
1961 return 1;
1962 }
1963
1964 static int
1965 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
1966 {
1967 if (the_low_target.remove_watchpoint != NULL)
1968 return the_low_target.remove_watchpoint (type, addr, len);
1969 else
1970 /* Unsupported (see target.h). */
1971 return 1;
1972 }
1973
1974 static int
1975 linux_stopped_by_watchpoint (void)
1976 {
1977 if (the_low_target.stopped_by_watchpoint != NULL)
1978 return the_low_target.stopped_by_watchpoint ();
1979 else
1980 return 0;
1981 }
1982
1983 static CORE_ADDR
1984 linux_stopped_data_address (void)
1985 {
1986 if (the_low_target.stopped_data_address != NULL)
1987 return the_low_target.stopped_data_address ();
1988 else
1989 return 0;
1990 }
1991
1992 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
1993 #if defined(__mcoldfire__)
1994 /* These should really be defined in the kernel's ptrace.h header. */
1995 #define PT_TEXT_ADDR 49*4
1996 #define PT_DATA_ADDR 50*4
1997 #define PT_TEXT_END_ADDR 51*4
1998 #endif
1999
2000 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2001 to tell gdb about. */
2002
2003 static int
2004 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2005 {
2006 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2007 unsigned long text, text_end, data;
2008 int pid = get_thread_process (current_inferior)->head.id;
2009
2010 errno = 0;
2011
2012 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2013 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2014 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2015
2016 if (errno == 0)
2017 {
2018 /* Both text and data offsets produced at compile-time (and so
2019 used by gdb) are relative to the beginning of the program,
2020 with the data segment immediately following the text segment.
2021 However, the actual runtime layout in memory may put the data
2022 somewhere else, so when we send gdb a data base-address, we
2023 use the real data base address and subtract the compile-time
2024 data base-address from it (which is just the length of the
2025 text segment). BSS immediately follows data in both
2026 cases. */
2027 *text_p = text;
2028 *data_p = data - (text_end - text);
2029
2030 return 1;
2031 }
2032 #endif
2033 return 0;
2034 }
2035 #endif
2036
2037 static const char *
2038 linux_arch_string (void)
2039 {
2040 return the_low_target.arch_string;
2041 }
2042
2043 static struct target_ops linux_target_ops = {
2044 linux_create_inferior,
2045 linux_attach,
2046 linux_kill,
2047 linux_detach,
2048 linux_join,
2049 linux_thread_alive,
2050 linux_resume,
2051 linux_wait,
2052 linux_fetch_registers,
2053 linux_store_registers,
2054 linux_read_memory,
2055 linux_write_memory,
2056 linux_look_up_symbols,
2057 linux_request_interrupt,
2058 linux_read_auxv,
2059 linux_insert_watchpoint,
2060 linux_remove_watchpoint,
2061 linux_stopped_by_watchpoint,
2062 linux_stopped_data_address,
2063 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2064 linux_read_offsets,
2065 #else
2066 NULL,
2067 #endif
2068 #ifdef USE_THREAD_DB
2069 thread_db_get_tls_address,
2070 #else
2071 NULL,
2072 #endif
2073 linux_arch_string,
2074 NULL,
2075 hostio_last_error_from_errno,
2076 };
2077
2078 static void
2079 linux_init_signals ()
2080 {
2081 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
2082 to find what the cancel signal actually is. */
2083 signal (__SIGRTMIN+1, SIG_IGN);
2084 }
2085
2086 void
2087 initialize_low (void)
2088 {
2089 thread_db_active = 0;
2090 set_target_ops (&linux_target_ops);
2091 set_breakpoint_data (the_low_target.breakpoint,
2092 the_low_target.breakpoint_len);
2093 linux_init_signals ();
2094 linux_test_for_tracefork ();
2095 }
This page took 0.104002 seconds and 5 git commands to generate.