Non-stop mode support.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40
41 #ifndef PTRACE_GETSIGINFO
42 # define PTRACE_GETSIGINFO 0x4202
43 # define PTRACE_SETSIGINFO 0x4203
44 #endif
45
46 #ifndef O_LARGEFILE
47 #define O_LARGEFILE 0
48 #endif
49
50 /* If the system headers did not provide the constants, hard-code the normal
51 values. */
52 #ifndef PTRACE_EVENT_FORK
53
54 #define PTRACE_SETOPTIONS 0x4200
55 #define PTRACE_GETEVENTMSG 0x4201
56
57 /* options set using PTRACE_SETOPTIONS */
58 #define PTRACE_O_TRACESYSGOOD 0x00000001
59 #define PTRACE_O_TRACEFORK 0x00000002
60 #define PTRACE_O_TRACEVFORK 0x00000004
61 #define PTRACE_O_TRACECLONE 0x00000008
62 #define PTRACE_O_TRACEEXEC 0x00000010
63 #define PTRACE_O_TRACEVFORKDONE 0x00000020
64 #define PTRACE_O_TRACEEXIT 0x00000040
65
66 /* Wait extended result codes for the above trace options. */
67 #define PTRACE_EVENT_FORK 1
68 #define PTRACE_EVENT_VFORK 2
69 #define PTRACE_EVENT_CLONE 3
70 #define PTRACE_EVENT_EXEC 4
71 #define PTRACE_EVENT_VFORK_DONE 5
72 #define PTRACE_EVENT_EXIT 6
73
74 #endif /* PTRACE_EVENT_FORK */
75
76 /* We can't always assume that this flag is available, but all systems
77 with the ptrace event handlers also have __WALL, so it's safe to use
78 in some contexts. */
79 #ifndef __WALL
80 #define __WALL 0x40000000 /* Wait for any child. */
81 #endif
82
83 #ifdef __UCLIBC__
84 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
85 #define HAS_NOMMU
86 #endif
87 #endif
88
89 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
90 representation of the thread ID.
91
92 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
93 the same as the LWP ID. */
94
95 struct inferior_list all_lwps;
96
97 /* A list of all unknown processes which receive stop signals. Some other
98 process will presumably claim each of these as forked children
99 momentarily. */
100
101 struct inferior_list stopped_pids;
102
103 /* FIXME this is a bit of a hack, and could be removed. */
104 int stopping_threads;
105
106 /* FIXME make into a target method? */
107 int using_threads = 1;
108 static int thread_db_active;
109
110 static int must_set_ptrace_flags;
111
112 /* This flag is true iff we've just created or attached to a new inferior
113 but it has not stopped yet. As soon as it does, we need to call the
114 low target's arch_setup callback. */
115 static int new_inferior;
116
117 static void linux_resume_one_lwp (struct inferior_list_entry *entry,
118 int step, int signal, siginfo_t *info);
119 static void linux_resume (struct thread_resume *resume_info, size_t n);
120 static void stop_all_lwps (void);
121 static int linux_wait_for_event (int pid, int *wstat, int options);
122 static int check_removed_breakpoint (struct lwp_info *event_child);
123 static void *add_lwp (unsigned long pid);
124 static int my_waitpid (int pid, int *status, int flags);
125 static int linux_stopped_by_watchpoint (void);
126
127 struct pending_signals
128 {
129 int signal;
130 siginfo_t info;
131 struct pending_signals *prev;
132 };
133
134 #define PTRACE_ARG3_TYPE long
135 #define PTRACE_XFER_TYPE long
136
137 #ifdef HAVE_LINUX_REGSETS
138 static char *disabled_regsets;
139 static int num_regsets;
140 #endif
141
142 /* FIXME: Delete eventually. */
143 #define inferior_pid (lwpid_of (get_thread_lwp (current_inferior)))
144
145 /* The read/write ends of the pipe registered as waitable file in the
146 event loop. */
147 static int linux_event_pipe[2] = { -1, -1 };
148
149 /* True if we're currently in async mode. */
150 #define target_is_async_p() (linux_event_pipe[0] != -1)
151
152 static void send_sigstop (struct inferior_list_entry *entry);
153 static void wait_for_sigstop (struct inferior_list_entry *entry);
154
155 static void
156 delete_lwp (struct lwp_info *lwp)
157 {
158 remove_thread (get_lwp_thread (lwp));
159 remove_inferior (&all_lwps, &lwp->head);
160 free (lwp);
161 }
162
163 /* Handle a GNU/Linux extended wait response. If we see a clone
164 event, we need to add the new LWP to our list (and not report the
165 trap to higher layers). */
166
167 static void
168 handle_extended_wait (struct lwp_info *event_child, int wstat)
169 {
170 int event = wstat >> 16;
171 struct lwp_info *new_lwp;
172
173 if (event == PTRACE_EVENT_CLONE)
174 {
175 unsigned long new_pid;
176 int ret, status = W_STOPCODE (SIGSTOP);
177
178 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
179
180 /* If we haven't already seen the new PID stop, wait for it now. */
181 if (! pull_pid_from_list (&stopped_pids, new_pid))
182 {
183 /* The new child has a pending SIGSTOP. We can't affect it until it
184 hits the SIGSTOP, but we're already attached. */
185
186 ret = my_waitpid (new_pid, &status, __WALL);
187
188 if (ret == -1)
189 perror_with_name ("waiting for new child");
190 else if (ret != new_pid)
191 warning ("wait returned unexpected PID %d", ret);
192 else if (!WIFSTOPPED (status))
193 warning ("wait returned unexpected status 0x%x", status);
194 }
195
196 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
197
198 new_lwp = (struct lwp_info *) add_lwp (new_pid);
199 add_thread (new_pid, new_lwp, new_pid);
200 new_thread_notify (thread_id_to_gdb_id (lwpid_of (new_lwp)));
201
202 /* Normally we will get the pending SIGSTOP. But in some cases
203 we might get another signal delivered to the group first.
204 If we do get another signal, be sure not to lose it. */
205 if (WSTOPSIG (status) == SIGSTOP)
206 {
207 if (stopping_threads)
208 new_lwp->stopped = 1;
209 else
210 ptrace (PTRACE_CONT, new_pid, 0, 0);
211 }
212 else
213 {
214 new_lwp->stop_expected = 1;
215 if (stopping_threads)
216 {
217 new_lwp->stopped = 1;
218 new_lwp->status_pending_p = 1;
219 new_lwp->status_pending = status;
220 }
221 else
222 /* Pass the signal on. This is what GDB does - except
223 shouldn't we really report it instead? */
224 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
225 }
226
227 /* Always resume the current thread. If we are stopping
228 threads, it will have a pending SIGSTOP; we may as well
229 collect it now. */
230 linux_resume_one_lwp (&event_child->head,
231 event_child->stepping, 0, NULL);
232 }
233 }
234
235 /* This function should only be called if the process got a SIGTRAP.
236 The SIGTRAP could mean several things.
237
238 On i386, where decr_pc_after_break is non-zero:
239 If we were single-stepping this process using PTRACE_SINGLESTEP,
240 we will get only the one SIGTRAP (even if the instruction we
241 stepped over was a breakpoint). The value of $eip will be the
242 next instruction.
243 If we continue the process using PTRACE_CONT, we will get a
244 SIGTRAP when we hit a breakpoint. The value of $eip will be
245 the instruction after the breakpoint (i.e. needs to be
246 decremented). If we report the SIGTRAP to GDB, we must also
247 report the undecremented PC. If we cancel the SIGTRAP, we
248 must resume at the decremented PC.
249
250 (Presumably, not yet tested) On a non-decr_pc_after_break machine
251 with hardware or kernel single-step:
252 If we single-step over a breakpoint instruction, our PC will
253 point at the following instruction. If we continue and hit a
254 breakpoint instruction, our PC will point at the breakpoint
255 instruction. */
256
257 static CORE_ADDR
258 get_stop_pc (void)
259 {
260 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
261
262 if (get_thread_lwp (current_inferior)->stepping)
263 return stop_pc;
264 else
265 return stop_pc - the_low_target.decr_pc_after_break;
266 }
267
268 static void *
269 add_lwp (unsigned long pid)
270 {
271 struct lwp_info *lwp;
272
273 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
274 memset (lwp, 0, sizeof (*lwp));
275
276 lwp->head.id = pid;
277
278 add_inferior_to_list (&all_lwps, &lwp->head);
279
280 return lwp;
281 }
282
283 /* Start an inferior process and returns its pid.
284 ALLARGS is a vector of program-name and args. */
285
286 static int
287 linux_create_inferior (char *program, char **allargs)
288 {
289 void *new_lwp;
290 int pid;
291
292 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
293 pid = vfork ();
294 #else
295 pid = fork ();
296 #endif
297 if (pid < 0)
298 perror_with_name ("fork");
299
300 if (pid == 0)
301 {
302 ptrace (PTRACE_TRACEME, 0, 0, 0);
303
304 signal (__SIGRTMIN + 1, SIG_DFL);
305
306 setpgid (0, 0);
307
308 execv (program, allargs);
309 if (errno == ENOENT)
310 execvp (program, allargs);
311
312 fprintf (stderr, "Cannot exec %s: %s.\n", program,
313 strerror (errno));
314 fflush (stderr);
315 _exit (0177);
316 }
317
318 new_lwp = add_lwp (pid);
319 add_thread (pid, new_lwp, pid);
320 must_set_ptrace_flags = 1;
321 new_inferior = 1;
322
323 return pid;
324 }
325
326 /* Attach to an inferior process. */
327
328 void
329 linux_attach_lwp (unsigned long pid)
330 {
331 struct lwp_info *new_lwp;
332
333 if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0)
334 {
335 if (all_threads.head != NULL)
336 {
337 /* If we fail to attach to an LWP, just warn. */
338 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", pid,
339 strerror (errno), errno);
340 fflush (stderr);
341 return;
342 }
343 else
344 /* If we fail to attach to a process, report an error. */
345 error ("Cannot attach to lwp %ld: %s (%d)\n", pid,
346 strerror (errno), errno);
347 }
348
349 /* FIXME: This intermittently fails.
350 We need to wait for SIGSTOP first. */
351 ptrace (PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACECLONE);
352
353 new_lwp = (struct lwp_info *) add_lwp (pid);
354 add_thread (pid, new_lwp, pid);
355 new_thread_notify (thread_id_to_gdb_id (lwpid_of (new_lwp)));
356
357 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
358 brings it to a halt.
359
360 There are several cases to consider here:
361
362 1) gdbserver has already attached to the process and is being notified
363 of a new thread that is being created.
364 In this case we should ignore that SIGSTOP and resume the process.
365 This is handled below by setting stop_expected = 1.
366
367 2) This is the first thread (the process thread), and we're attaching
368 to it via attach_inferior.
369 In this case we want the process thread to stop.
370 This is handled by having linux_attach clear stop_expected after
371 we return.
372 ??? If the process already has several threads we leave the other
373 threads running.
374
375 3) GDB is connecting to gdbserver and is requesting an enumeration of all
376 existing threads.
377 In this case we want the thread to stop.
378 FIXME: This case is currently not properly handled.
379 We should wait for the SIGSTOP but don't. Things work apparently
380 because enough time passes between when we ptrace (ATTACH) and when
381 gdb makes the next ptrace call on the thread.
382
383 On the other hand, if we are currently trying to stop all threads, we
384 should treat the new thread as if we had sent it a SIGSTOP. This works
385 because we are guaranteed that the add_lwp call above added us to the
386 end of the list, and so the new thread has not yet reached
387 wait_for_sigstop (but will). */
388 if (! stopping_threads)
389 new_lwp->stop_expected = 1;
390 }
391
392 int
393 linux_attach (unsigned long pid)
394 {
395 struct lwp_info *lwp;
396
397 linux_attach_lwp (pid);
398
399 if (!non_stop)
400 {
401 /* Don't ignore the initial SIGSTOP if we just attached to this
402 process. It will be collected by wait shortly. */
403 lwp = (struct lwp_info *) find_inferior_id (&all_lwps, pid);
404 lwp->stop_expected = 0;
405 }
406
407 new_inferior = 1;
408
409 return 0;
410 }
411
412 /* Kill the inferior process. Make us have no inferior. */
413
414 static void
415 linux_kill_one_lwp (struct inferior_list_entry *entry)
416 {
417 struct thread_info *thread = (struct thread_info *) entry;
418 struct lwp_info *lwp = get_thread_lwp (thread);
419 int pid;
420 int wstat;
421
422 /* We avoid killing the first thread here, because of a Linux kernel (at
423 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
424 the children get a chance to be reaped, it will remain a zombie
425 forever. */
426 if (entry == all_threads.head)
427 return;
428
429 /* If we're killing a running inferior, make sure it is stopped
430 first, as PTRACE_KILL will not work otherwise. */
431 if (!lwp->stopped)
432 send_sigstop (&lwp->head);
433
434 do
435 {
436 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
437
438 /* Make sure it died. The loop is most likely unnecessary. */
439 pid = linux_wait_for_event (lwpid_of (lwp), &wstat, __WALL);
440 } while (pid > 0 && WIFSTOPPED (wstat));
441 }
442
443 static void
444 linux_kill (void)
445 {
446 struct thread_info *thread = (struct thread_info *) all_threads.head;
447 struct lwp_info *lwp;
448 int wstat;
449 int pid;
450
451 if (thread == NULL)
452 return;
453
454 for_each_inferior (&all_threads, linux_kill_one_lwp);
455
456 /* See the comment in linux_kill_one_lwp. We did not kill the first
457 thread in the list, so do so now. */
458 lwp = get_thread_lwp (thread);
459
460 if (debug_threads)
461 fprintf (stderr, "lk_1: killing lwp %ld\n", lwpid_of (lwp));
462
463 /* If we're killing a running inferior, make sure it is stopped
464 first, as PTRACE_KILL will not work otherwise. */
465 if (!lwp->stopped)
466 send_sigstop (&lwp->head);
467
468 do
469 {
470 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
471
472 /* Make sure it died. The loop is most likely unnecessary. */
473 pid = linux_wait_for_event (lwpid_of (lwp), &wstat, __WALL);
474 } while (pid > 0 && WIFSTOPPED (wstat));
475
476 delete_lwp (lwp);
477 clear_inferiors ();
478 }
479
480 static void
481 linux_detach_one_lwp (struct inferior_list_entry *entry)
482 {
483 struct thread_info *thread = (struct thread_info *) entry;
484 struct lwp_info *lwp = get_thread_lwp (thread);
485
486 /* If we're detaching from a running inferior, make sure it is
487 stopped first, as PTRACE_DETACH will not work otherwise. */
488 if (!lwp->stopped)
489 {
490 int pid = lwpid_of (lwp);
491
492 stopping_threads = 1;
493 send_sigstop (&lwp->head);
494
495 /* If this detects a new thread through a clone event, the new
496 thread is appended to the end of the lwp list, so we'll
497 eventually detach from it. */
498 wait_for_sigstop (&lwp->head);
499 stopping_threads = 0;
500
501 /* If LWP exits while we're trying to stop it, there's nothing
502 left to do. */
503 lwp = (struct lwp_info *) find_inferior_id (&all_lwps, pid);
504 if (lwp == NULL)
505 return;
506 }
507
508 /* Make sure the process isn't stopped at a breakpoint that's
509 no longer there. */
510 check_removed_breakpoint (lwp);
511
512 /* If this process is stopped but is expecting a SIGSTOP, then make
513 sure we take care of that now. This isn't absolutely guaranteed
514 to collect the SIGSTOP, but is fairly likely to. */
515 if (lwp->stop_expected)
516 {
517 int wstat;
518 /* Clear stop_expected, so that the SIGSTOP will be reported. */
519 lwp->stop_expected = 0;
520 if (lwp->stopped)
521 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
522 linux_wait_for_event (lwpid_of (lwp), &wstat, __WALL);
523 }
524
525 /* Flush any pending changes to the process's registers. */
526 regcache_invalidate_one ((struct inferior_list_entry *)
527 get_lwp_thread (lwp));
528
529 /* Finally, let it resume. */
530 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
531
532 delete_lwp (lwp);
533 }
534
535 static int
536 linux_detach (void)
537 {
538 delete_all_breakpoints ();
539 for_each_inferior (&all_threads, linux_detach_one_lwp);
540 clear_inferiors ();
541 return 0;
542 }
543
544 static void
545 linux_join (void)
546 {
547 int status, ret;
548 struct thread_info *thread;
549 struct lwp_info *lwp;
550
551 thread = (struct thread_info *) all_threads.head;
552 lwp = get_thread_lwp (thread);
553
554 do {
555 ret = my_waitpid (lwpid_of (lwp), &status, 0);
556 if (WIFEXITED (status) || WIFSIGNALED (status))
557 break;
558 } while (ret != -1 || errno != ECHILD);
559 }
560
561 /* Return nonzero if the given thread is still alive. */
562 static int
563 linux_thread_alive (unsigned long lwpid)
564 {
565 if (find_inferior_id (&all_threads, lwpid) != NULL)
566 return 1;
567 else
568 return 0;
569 }
570
571 /* Return nonzero if this process stopped at a breakpoint which
572 no longer appears to be inserted. Also adjust the PC
573 appropriately to resume where the breakpoint used to be. */
574 static int
575 check_removed_breakpoint (struct lwp_info *event_child)
576 {
577 CORE_ADDR stop_pc;
578 struct thread_info *saved_inferior;
579
580 if (event_child->pending_is_breakpoint == 0)
581 return 0;
582
583 if (debug_threads)
584 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
585 lwpid_of (event_child));
586
587 saved_inferior = current_inferior;
588 current_inferior = get_lwp_thread (event_child);
589
590 stop_pc = get_stop_pc ();
591
592 /* If the PC has changed since we stopped, then we shouldn't do
593 anything. This happens if, for instance, GDB handled the
594 decr_pc_after_break subtraction itself. */
595 if (stop_pc != event_child->pending_stop_pc)
596 {
597 if (debug_threads)
598 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
599 event_child->pending_stop_pc);
600
601 event_child->pending_is_breakpoint = 0;
602 current_inferior = saved_inferior;
603 return 0;
604 }
605
606 /* If the breakpoint is still there, we will report hitting it. */
607 if ((*the_low_target.breakpoint_at) (stop_pc))
608 {
609 if (debug_threads)
610 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
611 current_inferior = saved_inferior;
612 return 0;
613 }
614
615 if (debug_threads)
616 fprintf (stderr, "Removed breakpoint.\n");
617
618 /* For decr_pc_after_break targets, here is where we perform the
619 decrement. We go immediately from this function to resuming,
620 and can not safely call get_stop_pc () again. */
621 if (the_low_target.set_pc != NULL)
622 (*the_low_target.set_pc) (stop_pc);
623
624 /* We consumed the pending SIGTRAP. */
625 event_child->pending_is_breakpoint = 0;
626 event_child->status_pending_p = 0;
627 event_child->status_pending = 0;
628
629 current_inferior = saved_inferior;
630 return 1;
631 }
632
633 /* Return 1 if this lwp has an interesting status pending. This
634 function may silently resume an inferior lwp. */
635 static int
636 status_pending_p (struct inferior_list_entry *entry, void *dummy)
637 {
638 struct lwp_info *lwp = (struct lwp_info *) entry;
639
640 if (lwp->status_pending_p && !lwp->suspended)
641 if (check_removed_breakpoint (lwp))
642 {
643 /* This thread was stopped at a breakpoint, and the breakpoint
644 is now gone. We were told to continue (or step...) all threads,
645 so GDB isn't trying to single-step past this breakpoint.
646 So instead of reporting the old SIGTRAP, pretend we got to
647 the breakpoint just after it was removed instead of just
648 before; resume the process. */
649 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
650 return 0;
651 }
652
653 return (lwp->status_pending_p && !lwp->suspended);
654 }
655
656 static struct lwp_info *
657 linux_wait_for_lwp (int pid, int *wstatp, int options)
658 {
659 int ret;
660 int to_wait_for = pid;
661 struct lwp_info *child = NULL;
662
663 if (debug_threads)
664 fprintf (stderr, "linux_wait_for_lwp: %d\n", pid);
665
666 options |= __WALL;
667
668 retry:
669
670 ret = my_waitpid (to_wait_for, wstatp, options);
671 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
672 return NULL;
673 else if (ret == -1)
674 perror_with_name ("waitpid");
675
676 if (debug_threads
677 && (!WIFSTOPPED (*wstatp)
678 || (WSTOPSIG (*wstatp) != 32
679 && WSTOPSIG (*wstatp) != 33)))
680 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
681
682 child = (struct lwp_info *) find_inferior_id (&all_lwps, ret);
683
684 /* If we didn't find a process, one of two things presumably happened:
685 - A process we started and then detached from has exited. Ignore it.
686 - A process we are controlling has forked and the new child's stop
687 was reported to us by the kernel. Save its PID. */
688 if (child == NULL && WIFSTOPPED (*wstatp))
689 {
690 add_pid_to_list (&stopped_pids, ret);
691 goto retry;
692 }
693 else if (child == NULL)
694 goto retry;
695
696 child->stopped = 1;
697 child->pending_is_breakpoint = 0;
698
699 child->last_status = *wstatp;
700
701 /* Architecture-specific setup after inferior is running.
702 This needs to happen after we have attached to the inferior
703 and it is stopped for the first time, but before we access
704 any inferior registers. */
705 if (new_inferior)
706 {
707 the_low_target.arch_setup ();
708 #ifdef HAVE_LINUX_REGSETS
709 memset (disabled_regsets, 0, num_regsets);
710 #endif
711 new_inferior = 0;
712 }
713
714 if (debug_threads
715 && WIFSTOPPED (*wstatp))
716 {
717 struct thread_info *saved_inferior = current_inferior;
718 current_inferior = (struct thread_info *)
719 find_inferior_id (&all_threads, lwpid_of (child));
720 /* For testing only; i386_stop_pc prints out a diagnostic. */
721 if (the_low_target.get_pc != NULL)
722 get_stop_pc ();
723 current_inferior = saved_inferior;
724 }
725
726 return child;
727 }
728
729 /* Wait for an event from child PID. If PID is -1, wait for any
730 child. Store the stop status through the status pointer WSTAT.
731 OPTIONS is passed to the waitpid call. Return 0 if no child stop
732 event was found and OPTIONS contains WNOHANG. Return the PID of
733 the stopped child otherwise. */
734
735 static int
736 linux_wait_for_event (int pid, int *wstat, int options)
737 {
738 CORE_ADDR stop_pc;
739 struct lwp_info *event_child = NULL;
740 int bp_status;
741 struct lwp_info *requested_child = NULL;
742
743 /* Check for a process with a pending status. */
744 /* It is possible that the user changed the pending task's registers since
745 it stopped. We correctly handle the change of PC if we hit a breakpoint
746 (in check_removed_breakpoint); signals should be reported anyway. */
747
748 if (pid == -1)
749 {
750 event_child = (struct lwp_info *)
751 find_inferior (&all_lwps, status_pending_p, NULL);
752 if (debug_threads && event_child)
753 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
754 }
755 else
756 {
757 requested_child = (struct lwp_info *)
758 find_inferior_id (&all_lwps, pid);
759 if (requested_child->status_pending_p
760 && !check_removed_breakpoint (requested_child))
761 event_child = requested_child;
762 }
763
764 if (event_child != NULL)
765 {
766 if (debug_threads)
767 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
768 lwpid_of (event_child), event_child->status_pending);
769 *wstat = event_child->status_pending;
770 event_child->status_pending_p = 0;
771 event_child->status_pending = 0;
772 current_inferior = get_lwp_thread (event_child);
773 return lwpid_of (event_child);
774 }
775
776 /* We only enter this loop if no process has a pending wait status. Thus
777 any action taken in response to a wait status inside this loop is
778 responding as soon as we detect the status, not after any pending
779 events. */
780 while (1)
781 {
782 event_child = linux_wait_for_lwp (pid, wstat, options);
783
784 if ((options & WNOHANG) && event_child == NULL)
785 return 0;
786
787 if (event_child == NULL)
788 error ("event from unknown child");
789
790 current_inferior = get_lwp_thread (event_child);
791
792 /* Check for thread exit. */
793 if (! WIFSTOPPED (*wstat))
794 {
795 int lwpid = lwpid_of (event_child);
796 if (debug_threads)
797 fprintf (stderr, "LWP %d exiting\n", lwpid);
798
799 /* If the last thread is exiting, just return. */
800 if (all_threads.head == all_threads.tail)
801 {
802 if (debug_threads)
803 fprintf (stderr, "LWP %d is last lwp of process\n", lwpid);
804 return lwpid_of (event_child);
805 }
806
807 dead_thread_notify (thread_id_to_gdb_id (lwpid_of (event_child)));
808 delete_lwp (event_child);
809
810 if (!non_stop)
811 {
812 current_inferior = (struct thread_info *) all_threads.head;
813 if (debug_threads)
814 fprintf (stderr, "Current inferior is now %ld\n",
815 lwpid_of (get_thread_lwp (current_inferior)));
816 }
817 else
818 {
819 current_inferior = NULL;
820 if (debug_threads)
821 fprintf (stderr, "Current inferior is now <NULL>\n");
822 }
823
824 /* If we were waiting for this particular child to do something...
825 well, it did something. */
826 if (requested_child != NULL)
827 return lwpid;
828
829 /* Wait for a more interesting event. */
830 continue;
831 }
832
833 if (WIFSTOPPED (*wstat)
834 && WSTOPSIG (*wstat) == SIGSTOP
835 && event_child->stop_expected)
836 {
837 if (debug_threads)
838 fprintf (stderr, "Expected stop.\n");
839 event_child->stop_expected = 0;
840 linux_resume_one_lwp (&event_child->head,
841 event_child->stepping, 0, NULL);
842 continue;
843 }
844
845 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
846 && *wstat >> 16 != 0)
847 {
848 handle_extended_wait (event_child, *wstat);
849 continue;
850 }
851
852 /* If GDB is not interested in this signal, don't stop other
853 threads, and don't report it to GDB. Just resume the
854 inferior right away. We do this for threading-related
855 signals as well as any that GDB specifically requested we
856 ignore. But never ignore SIGSTOP if we sent it ourselves,
857 and do not ignore signals when stepping - they may require
858 special handling to skip the signal handler. */
859 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
860 thread library? */
861 if (WIFSTOPPED (*wstat)
862 && !event_child->stepping
863 && (
864 #ifdef USE_THREAD_DB
865 (thread_db_active
866 && (WSTOPSIG (*wstat) == __SIGRTMIN
867 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
868 ||
869 #endif
870 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
871 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
872 {
873 siginfo_t info, *info_p;
874
875 if (debug_threads)
876 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
877 WSTOPSIG (*wstat), lwpid_of (event_child));
878
879 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
880 info_p = &info;
881 else
882 info_p = NULL;
883 linux_resume_one_lwp (&event_child->head,
884 event_child->stepping,
885 WSTOPSIG (*wstat), info_p);
886 continue;
887 }
888
889 /* If this event was not handled above, and is not a SIGTRAP, report
890 it. */
891 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
892 return lwpid_of (event_child);
893
894 /* If this target does not support breakpoints, we simply report the
895 SIGTRAP; it's of no concern to us. */
896 if (the_low_target.get_pc == NULL)
897 return lwpid_of (event_child);
898
899 stop_pc = get_stop_pc ();
900
901 /* bp_reinsert will only be set if we were single-stepping.
902 Notice that we will resume the process after hitting
903 a gdbserver breakpoint; single-stepping to/over one
904 is not supported (yet). */
905 if (event_child->bp_reinsert != 0)
906 {
907 if (debug_threads)
908 fprintf (stderr, "Reinserted breakpoint.\n");
909 reinsert_breakpoint (event_child->bp_reinsert);
910 event_child->bp_reinsert = 0;
911
912 /* Clear the single-stepping flag and SIGTRAP as we resume. */
913 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
914 continue;
915 }
916
917 bp_status = check_breakpoints (stop_pc);
918
919 if (bp_status != 0)
920 {
921 if (debug_threads)
922 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
923
924 /* We hit one of our own breakpoints. We mark it as a pending
925 breakpoint, so that check_removed_breakpoint () will do the PC
926 adjustment for us at the appropriate time. */
927 event_child->pending_is_breakpoint = 1;
928 event_child->pending_stop_pc = stop_pc;
929
930 /* We may need to put the breakpoint back. We continue in the event
931 loop instead of simply replacing the breakpoint right away,
932 in order to not lose signals sent to the thread that hit the
933 breakpoint. Unfortunately this increases the window where another
934 thread could sneak past the removed breakpoint. For the current
935 use of server-side breakpoints (thread creation) this is
936 acceptable; but it needs to be considered before this breakpoint
937 mechanism can be used in more general ways. For some breakpoints
938 it may be necessary to stop all other threads, but that should
939 be avoided where possible.
940
941 If breakpoint_reinsert_addr is NULL, that means that we can
942 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
943 mark it for reinsertion, and single-step.
944
945 Otherwise, call the target function to figure out where we need
946 our temporary breakpoint, create it, and continue executing this
947 process. */
948
949 /* NOTE: we're lifting breakpoints in non-stop mode. This
950 is currently only used for thread event breakpoints, so
951 it isn't that bad as long as we have PTRACE_EVENT_CLONE
952 events. */
953 if (bp_status == 2)
954 /* No need to reinsert. */
955 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
956 else if (the_low_target.breakpoint_reinsert_addr == NULL)
957 {
958 event_child->bp_reinsert = stop_pc;
959 uninsert_breakpoint (stop_pc);
960 linux_resume_one_lwp (&event_child->head, 1, 0, NULL);
961 }
962 else
963 {
964 reinsert_breakpoint_by_bp
965 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
966 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
967 }
968
969 continue;
970 }
971
972 if (debug_threads)
973 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
974
975 /* If we were single-stepping, we definitely want to report the
976 SIGTRAP. Although the single-step operation has completed,
977 do not clear clear the stepping flag yet; we need to check it
978 in wait_for_sigstop. */
979 if (event_child->stepping)
980 return lwpid_of (event_child);
981
982 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
983 Check if it is a breakpoint, and if so mark the process information
984 accordingly. This will handle both the necessary fiddling with the
985 PC on decr_pc_after_break targets and suppressing extra threads
986 hitting a breakpoint if two hit it at once and then GDB removes it
987 after the first is reported. Arguably it would be better to report
988 multiple threads hitting breakpoints simultaneously, but the current
989 remote protocol does not allow this. */
990 if ((*the_low_target.breakpoint_at) (stop_pc))
991 {
992 event_child->pending_is_breakpoint = 1;
993 event_child->pending_stop_pc = stop_pc;
994 }
995
996 return lwpid_of (event_child);
997 }
998
999 /* NOTREACHED */
1000 return 0;
1001 }
1002
1003 /* Wait for process, returns status. */
1004
1005 static unsigned long
1006 linux_wait_1 (struct target_waitstatus *ourstatus, int target_options)
1007 {
1008 int w;
1009 struct thread_info *thread = NULL;
1010 struct lwp_info *lwp = NULL;
1011 int options;
1012 int wait_pid = -1;
1013 int pid;
1014
1015 /* Translate generic target options into linux options. */
1016 options = __WALL;
1017 if (target_options & TARGET_WNOHANG)
1018 options |= WNOHANG;
1019
1020 retry:
1021 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1022
1023 /* If we were only supposed to resume one thread, only wait for
1024 that thread - if it's still alive. If it died, however - which
1025 can happen if we're coming from the thread death case below -
1026 then we need to make sure we restart the other threads. We could
1027 pick a thread at random or restart all; restarting all is less
1028 arbitrary. */
1029 if (!non_stop && cont_thread != 0 && cont_thread != -1)
1030 {
1031 thread = (struct thread_info *) find_inferior_id (&all_threads,
1032 cont_thread);
1033
1034 /* No stepping, no signal - unless one is pending already, of course. */
1035 if (thread == NULL)
1036 {
1037 struct thread_resume resume_info;
1038 resume_info.thread = -1;
1039 resume_info.kind = resume_continue;
1040 resume_info.sig = 0;
1041 linux_resume (&resume_info, 1);
1042 }
1043 else
1044 wait_pid = cont_thread;
1045 }
1046
1047 pid = linux_wait_for_event (wait_pid, &w, options);
1048 if (pid == 0) /* only if TARGET_WNOHANG */
1049 return pid;
1050
1051 lwp = get_thread_lwp (current_inferior);
1052
1053 if (must_set_ptrace_flags)
1054 {
1055 ptrace (PTRACE_SETOPTIONS, lwpid_of (lwp), 0, PTRACE_O_TRACECLONE);
1056 must_set_ptrace_flags = 0;
1057 }
1058 /* If we are waiting for a particular child, and it exited,
1059 linux_wait_for_event will return its exit status. Similarly if
1060 the last child exited. If this is not the last child, however,
1061 do not report it as exited until there is a 'thread exited' response
1062 available in the remote protocol. Instead, just wait for another event.
1063 This should be safe, because if the thread crashed we will already
1064 have reported the termination signal to GDB; that should stop any
1065 in-progress stepping operations, etc.
1066
1067 Report the exit status of the last thread to exit. This matches
1068 LinuxThreads' behavior. */
1069
1070 if (all_threads.head == all_threads.tail)
1071 {
1072 if (WIFEXITED (w) || WIFSIGNALED (w))
1073 {
1074 int pid;
1075
1076 pid = pid_of (lwp);
1077
1078 delete_lwp (lwp);
1079 clear_inferiors ();
1080
1081 current_inferior = NULL;
1082
1083 if (WIFEXITED (w))
1084 {
1085 ourstatus->kind = TARGET_WAITKIND_EXITED;
1086 ourstatus->value.integer = WEXITSTATUS (w);
1087
1088 if (debug_threads)
1089 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1090 }
1091 else
1092 {
1093 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1094 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1095
1096 if (debug_threads)
1097 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1098
1099 }
1100
1101 return pid;
1102 }
1103 }
1104 else
1105 {
1106 if (!WIFSTOPPED (w))
1107 goto retry;
1108 }
1109
1110 /* In all-stop, stop all threads. Be careful to only do this if
1111 we're about to report an event to GDB. */
1112 if (!non_stop)
1113 stop_all_lwps ();
1114
1115 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1116
1117 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1118 {
1119 /* A thread that has been requested to stop by GDB with vCont;t,
1120 and it stopped cleanly, so report as SIG0. The use of
1121 SIGSTOP is an implementation detail. */
1122 ourstatus->value.sig = TARGET_SIGNAL_0;
1123 }
1124 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1125 {
1126 /* A thread that has been requested to stop by GDB with vCont;t,
1127 but, it stopped for other reasons. Set stop_expected so the
1128 pending SIGSTOP is ignored and the LWP is resumed. */
1129 lwp->stop_expected = 1;
1130 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1131 }
1132 else
1133 {
1134 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1135 }
1136
1137 if (debug_threads)
1138 fprintf (stderr, "linux_wait ret = %ld, %d, %d\n",
1139 lwpid_of (lwp),
1140 ourstatus->kind,
1141 ourstatus->value.sig);
1142
1143 return lwpid_of (lwp);
1144 }
1145
1146 /* Get rid of any pending event in the pipe. */
1147 static void
1148 async_file_flush (void)
1149 {
1150 int ret;
1151 char buf;
1152
1153 do
1154 ret = read (linux_event_pipe[0], &buf, 1);
1155 while (ret >= 0 || (ret == -1 && errno == EINTR));
1156 }
1157
1158 /* Put something in the pipe, so the event loop wakes up. */
1159 static void
1160 async_file_mark (void)
1161 {
1162 int ret;
1163
1164 async_file_flush ();
1165
1166 do
1167 ret = write (linux_event_pipe[1], "+", 1);
1168 while (ret == 0 || (ret == -1 && errno == EINTR));
1169
1170 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1171 be awakened anyway. */
1172 }
1173
1174 static unsigned long
1175 linux_wait (struct target_waitstatus *ourstatus, int target_options)
1176 {
1177 unsigned long event_ptid;
1178
1179 if (debug_threads)
1180 fprintf (stderr, "linux_wait\n");
1181
1182 /* Flush the async file first. */
1183 if (target_is_async_p ())
1184 async_file_flush ();
1185
1186 event_ptid = linux_wait_1 (ourstatus, target_options);
1187
1188 /* If at least one stop was reported, there may be more. A single
1189 SIGCHLD can signal more than one child stop. */
1190 if (target_is_async_p ()
1191 && (target_options & TARGET_WNOHANG) != 0
1192 && event_ptid != 0)
1193 async_file_mark ();
1194
1195 return event_ptid;
1196 }
1197
1198 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
1199 thread groups are in use, we need to use tkill. */
1200
1201 static int
1202 kill_lwp (unsigned long lwpid, int signo)
1203 {
1204 static int tkill_failed;
1205
1206 errno = 0;
1207
1208 #ifdef SYS_tkill
1209 if (!tkill_failed)
1210 {
1211 int ret = syscall (SYS_tkill, lwpid, signo);
1212 if (errno != ENOSYS)
1213 return ret;
1214 errno = 0;
1215 tkill_failed = 1;
1216 }
1217 #endif
1218
1219 return kill (lwpid, signo);
1220 }
1221
1222 static void
1223 send_sigstop (struct inferior_list_entry *entry)
1224 {
1225 struct lwp_info *lwp = (struct lwp_info *) entry;
1226 int pid;
1227
1228 if (lwp->stopped)
1229 return;
1230
1231 pid = lwpid_of (lwp);
1232
1233 /* If we already have a pending stop signal for this process, don't
1234 send another. */
1235 if (lwp->stop_expected)
1236 {
1237 if (debug_threads)
1238 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1239
1240 /* We clear the stop_expected flag so that wait_for_sigstop
1241 will receive the SIGSTOP event (instead of silently resuming and
1242 waiting again). It'll be reset below. */
1243 lwp->stop_expected = 0;
1244 return;
1245 }
1246
1247 if (debug_threads)
1248 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1249
1250 kill_lwp (pid, SIGSTOP);
1251 }
1252
1253 static void
1254 wait_for_sigstop (struct inferior_list_entry *entry)
1255 {
1256 struct lwp_info *lwp = (struct lwp_info *) entry;
1257 struct thread_info *saved_inferior;
1258 int wstat;
1259 unsigned long saved_tid;
1260 unsigned long ptid;
1261
1262 if (lwp->stopped)
1263 return;
1264
1265 saved_inferior = current_inferior;
1266 if (saved_inferior != NULL)
1267 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1268 else
1269 saved_tid = 0; /* avoid bogus unused warning */
1270
1271 ptid = lwpid_of (lwp);
1272
1273 linux_wait_for_event (ptid, &wstat, __WALL);
1274
1275 /* If we stopped with a non-SIGSTOP signal, save it for later
1276 and record the pending SIGSTOP. If the process exited, just
1277 return. */
1278 if (WIFSTOPPED (wstat)
1279 && WSTOPSIG (wstat) != SIGSTOP)
1280 {
1281 if (debug_threads)
1282 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1283 lwpid_of (lwp), wstat);
1284
1285 /* Do not leave a pending single-step finish to be reported to
1286 the client. The client will give us a new action for this
1287 thread, possibly a continue request --- otherwise, the client
1288 would consider this pending SIGTRAP reported later a spurious
1289 signal. */
1290 if (WSTOPSIG (wstat) == SIGTRAP
1291 && lwp->stepping
1292 && !linux_stopped_by_watchpoint ())
1293 {
1294 if (debug_threads)
1295 fprintf (stderr, " single-step SIGTRAP ignored\n");
1296 }
1297 else
1298 {
1299 lwp->status_pending_p = 1;
1300 lwp->status_pending = wstat;
1301 }
1302 lwp->stop_expected = 1;
1303 }
1304
1305 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1306 current_inferior = saved_inferior;
1307 else
1308 {
1309 if (debug_threads)
1310 fprintf (stderr, "Previously current thread died.\n");
1311
1312 if (non_stop)
1313 {
1314 /* We can't change the current inferior behind GDB's back,
1315 otherwise, a subsequent command may apply to the wrong
1316 process. */
1317 current_inferior = NULL;
1318 }
1319 else
1320 {
1321 /* Set a valid thread as current. */
1322 set_desired_inferior (0);
1323 }
1324 }
1325 }
1326
1327 static void
1328 stop_all_lwps (void)
1329 {
1330 stopping_threads = 1;
1331 for_each_inferior (&all_lwps, send_sigstop);
1332 for_each_inferior (&all_lwps, wait_for_sigstop);
1333 stopping_threads = 0;
1334 }
1335
1336 /* Resume execution of the inferior process.
1337 If STEP is nonzero, single-step it.
1338 If SIGNAL is nonzero, give it that signal. */
1339
1340 static void
1341 linux_resume_one_lwp (struct inferior_list_entry *entry,
1342 int step, int signal, siginfo_t *info)
1343 {
1344 struct lwp_info *lwp = (struct lwp_info *) entry;
1345 struct thread_info *saved_inferior;
1346
1347 if (lwp->stopped == 0)
1348 return;
1349
1350 /* If we have pending signals or status, and a new signal, enqueue the
1351 signal. Also enqueue the signal if we are waiting to reinsert a
1352 breakpoint; it will be picked up again below. */
1353 if (signal != 0
1354 && (lwp->status_pending_p || lwp->pending_signals != NULL
1355 || lwp->bp_reinsert != 0))
1356 {
1357 struct pending_signals *p_sig;
1358 p_sig = xmalloc (sizeof (*p_sig));
1359 p_sig->prev = lwp->pending_signals;
1360 p_sig->signal = signal;
1361 if (info == NULL)
1362 memset (&p_sig->info, 0, sizeof (siginfo_t));
1363 else
1364 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1365 lwp->pending_signals = p_sig;
1366 }
1367
1368 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1369 return;
1370
1371 saved_inferior = current_inferior;
1372 current_inferior = get_lwp_thread (lwp);
1373
1374 if (debug_threads)
1375 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1376 lwpid_of (lwp), step ? "step" : "continue", signal,
1377 lwp->stop_expected ? "expected" : "not expected");
1378
1379 /* This bit needs some thinking about. If we get a signal that
1380 we must report while a single-step reinsert is still pending,
1381 we often end up resuming the thread. It might be better to
1382 (ew) allow a stack of pending events; then we could be sure that
1383 the reinsert happened right away and not lose any signals.
1384
1385 Making this stack would also shrink the window in which breakpoints are
1386 uninserted (see comment in linux_wait_for_lwp) but not enough for
1387 complete correctness, so it won't solve that problem. It may be
1388 worthwhile just to solve this one, however. */
1389 if (lwp->bp_reinsert != 0)
1390 {
1391 if (debug_threads)
1392 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1393 if (step == 0)
1394 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1395 step = 1;
1396
1397 /* Postpone any pending signal. It was enqueued above. */
1398 signal = 0;
1399 }
1400
1401 check_removed_breakpoint (lwp);
1402
1403 if (debug_threads && the_low_target.get_pc != NULL)
1404 {
1405 fprintf (stderr, " ");
1406 (*the_low_target.get_pc) ();
1407 }
1408
1409 /* If we have pending signals, consume one unless we are trying to reinsert
1410 a breakpoint. */
1411 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1412 {
1413 struct pending_signals **p_sig;
1414
1415 p_sig = &lwp->pending_signals;
1416 while ((*p_sig)->prev != NULL)
1417 p_sig = &(*p_sig)->prev;
1418
1419 signal = (*p_sig)->signal;
1420 if ((*p_sig)->info.si_signo != 0)
1421 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1422
1423 free (*p_sig);
1424 *p_sig = NULL;
1425 }
1426
1427 regcache_invalidate_one ((struct inferior_list_entry *)
1428 get_lwp_thread (lwp));
1429 errno = 0;
1430 lwp->stopped = 0;
1431 lwp->stepping = step;
1432 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal);
1433
1434 current_inferior = saved_inferior;
1435 if (errno)
1436 {
1437 /* ESRCH from ptrace either means that the thread was already
1438 running (an error) or that it is gone (a race condition). If
1439 it's gone, we will get a notification the next time we wait,
1440 so we can ignore the error. We could differentiate these
1441 two, but it's tricky without waiting; the thread still exists
1442 as a zombie, so sending it signal 0 would succeed. So just
1443 ignore ESRCH. */
1444 if (errno == ESRCH)
1445 return;
1446
1447 perror_with_name ("ptrace");
1448 }
1449 }
1450
1451 struct thread_resume_array
1452 {
1453 struct thread_resume *resume;
1454 size_t n;
1455 };
1456
1457 /* This function is called once per thread. We look up the thread
1458 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1459 resume request.
1460
1461 This algorithm is O(threads * resume elements), but resume elements
1462 is small (and will remain small at least until GDB supports thread
1463 suspension). */
1464 static int
1465 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1466 {
1467 struct lwp_info *lwp;
1468 struct thread_info *thread;
1469 int ndx;
1470 struct thread_resume_array *r;
1471
1472 thread = (struct thread_info *) entry;
1473 lwp = get_thread_lwp (thread);
1474 r = arg;
1475
1476 for (ndx = 0; ndx < r->n; ndx++)
1477 if (r->resume[ndx].thread == -1 || r->resume[ndx].thread == entry->id)
1478 {
1479 lwp->resume = &r->resume[ndx];
1480 return 0;
1481 }
1482
1483 /* No resume action for this thread. */
1484 lwp->resume = NULL;
1485
1486 return 0;
1487 }
1488
1489
1490 /* Set *FLAG_P if this lwp has an interesting status pending. */
1491 static int
1492 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1493 {
1494 struct lwp_info *lwp = (struct lwp_info *) entry;
1495
1496 /* LWPs which will not be resumed are not interesting, because
1497 we might not wait for them next time through linux_wait. */
1498 if (lwp->resume == NULL)
1499 return 0;
1500
1501 /* If this thread has a removed breakpoint, we won't have any
1502 events to report later, so check now. check_removed_breakpoint
1503 may clear status_pending_p. We avoid calling check_removed_breakpoint
1504 for any thread that we are not otherwise going to resume - this
1505 lets us preserve stopped status when two threads hit a breakpoint.
1506 GDB removes the breakpoint to single-step a particular thread
1507 past it, then re-inserts it and resumes all threads. We want
1508 to report the second thread without resuming it in the interim. */
1509 if (lwp->status_pending_p)
1510 check_removed_breakpoint (lwp);
1511
1512 if (lwp->status_pending_p)
1513 * (int *) flag_p = 1;
1514
1515 return 0;
1516 }
1517
1518 /* This function is called once per thread. We check the thread's resume
1519 request, which will tell us whether to resume, step, or leave the thread
1520 stopped; and what signal, if any, it should be sent.
1521
1522 For threads which we aren't explicitly told otherwise, we preserve
1523 the stepping flag; this is used for stepping over gdbserver-placed
1524 breakpoints.
1525
1526 If pending_flags was set in any thread, we queue any needed
1527 signals, since we won't actually resume. We already have a pending
1528 event to report, so we don't need to preserve any step requests;
1529 they should be re-issued if necessary. */
1530
1531 static int
1532 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1533 {
1534 struct lwp_info *lwp;
1535 struct thread_info *thread;
1536 int step;
1537 int pending_flag = * (int *) arg;
1538
1539 thread = (struct thread_info *) entry;
1540 lwp = get_thread_lwp (thread);
1541
1542 if (lwp->resume == NULL)
1543 return 0;
1544
1545 if (lwp->resume->kind == resume_stop)
1546 {
1547 if (debug_threads)
1548 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1549
1550 if (!lwp->stopped)
1551 {
1552 if (debug_threads)
1553 fprintf (stderr, "running -> suspending %ld\n", lwpid_of (lwp));
1554
1555 lwp->suspended = 1;
1556 send_sigstop (&lwp->head);
1557 }
1558 else
1559 {
1560 if (debug_threads)
1561 {
1562 if (lwp->suspended)
1563 fprintf (stderr, "already stopped/suspended LWP %ld\n",
1564 lwpid_of (lwp));
1565 else
1566 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
1567 lwpid_of (lwp));
1568 }
1569
1570 /* Make sure we leave the LWP suspended, so we don't try to
1571 resume it without GDB telling us to. FIXME: The LWP may
1572 have been stopped in an internal event that was not meant
1573 to be notified back to GDB (e.g., gdbserver breakpoint),
1574 so we should be reporting a stop event in that case
1575 too. */
1576 lwp->suspended = 1;
1577 }
1578
1579 /* For stop requests, we're done. */
1580 lwp->resume = NULL;
1581 return 0;
1582 }
1583 else
1584 lwp->suspended = 0;
1585
1586 /* If this thread which is about to be resumed has a pending status,
1587 then don't resume any threads - we can just report the pending
1588 status. Make sure to queue any signals that would otherwise be
1589 sent. In all-stop mode, we do this decision based on if *any*
1590 thread has a pending status. */
1591 if (non_stop)
1592 resume_status_pending_p (&lwp->head, &pending_flag);
1593
1594 if (!pending_flag)
1595 {
1596 if (debug_threads)
1597 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
1598
1599 if (lwp->resume->thread == -1
1600 && lwp->stepping
1601 && lwp->pending_is_breakpoint)
1602 step = 1;
1603 else
1604 step = (lwp->resume->kind == resume_step);
1605
1606 linux_resume_one_lwp (&lwp->head, step, lwp->resume->sig, NULL);
1607 }
1608 else
1609 {
1610 if (debug_threads)
1611 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
1612
1613 /* If we have a new signal, enqueue the signal. */
1614 if (lwp->resume->sig != 0)
1615 {
1616 struct pending_signals *p_sig;
1617 p_sig = xmalloc (sizeof (*p_sig));
1618 p_sig->prev = lwp->pending_signals;
1619 p_sig->signal = lwp->resume->sig;
1620 memset (&p_sig->info, 0, sizeof (siginfo_t));
1621
1622 /* If this is the same signal we were previously stopped by,
1623 make sure to queue its siginfo. We can ignore the return
1624 value of ptrace; if it fails, we'll skip
1625 PTRACE_SETSIGINFO. */
1626 if (WIFSTOPPED (lwp->last_status)
1627 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
1628 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1629
1630 lwp->pending_signals = p_sig;
1631 }
1632 }
1633
1634 lwp->resume = NULL;
1635 return 0;
1636 }
1637
1638 static void
1639 linux_resume (struct thread_resume *resume_info, size_t n)
1640 {
1641 int pending_flag;
1642 struct thread_resume_array array = { resume_info, n };
1643
1644 find_inferior (&all_threads, linux_set_resume_request, &array);
1645
1646 /* If there is a thread which would otherwise be resumed, which
1647 has a pending status, then don't resume any threads - we can just
1648 report the pending status. Make sure to queue any signals
1649 that would otherwise be sent. In non-stop mode, we'll apply this
1650 logic to each thread individually. */
1651 pending_flag = 0;
1652 if (!non_stop)
1653 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
1654
1655 if (debug_threads)
1656 {
1657 if (pending_flag)
1658 fprintf (stderr, "Not resuming, pending status\n");
1659 else
1660 fprintf (stderr, "Resuming, no pending status\n");
1661 }
1662
1663 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
1664 }
1665
1666 #ifdef HAVE_LINUX_USRREGS
1667
1668 int
1669 register_addr (int regnum)
1670 {
1671 int addr;
1672
1673 if (regnum < 0 || regnum >= the_low_target.num_regs)
1674 error ("Invalid register number %d.", regnum);
1675
1676 addr = the_low_target.regmap[regnum];
1677
1678 return addr;
1679 }
1680
1681 /* Fetch one register. */
1682 static void
1683 fetch_register (int regno)
1684 {
1685 CORE_ADDR regaddr;
1686 int i, size;
1687 char *buf;
1688
1689 if (regno >= the_low_target.num_regs)
1690 return;
1691 if ((*the_low_target.cannot_fetch_register) (regno))
1692 return;
1693
1694 regaddr = register_addr (regno);
1695 if (regaddr == -1)
1696 return;
1697 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1698 & - sizeof (PTRACE_XFER_TYPE));
1699 buf = alloca (size);
1700 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1701 {
1702 errno = 0;
1703 *(PTRACE_XFER_TYPE *) (buf + i) =
1704 ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1705 regaddr += sizeof (PTRACE_XFER_TYPE);
1706 if (errno != 0)
1707 {
1708 /* Warning, not error, in case we are attached; sometimes the
1709 kernel doesn't let us at the registers. */
1710 char *err = strerror (errno);
1711 char *msg = alloca (strlen (err) + 128);
1712 sprintf (msg, "reading register %d: %s", regno, err);
1713 error (msg);
1714 goto error_exit;
1715 }
1716 }
1717
1718 if (the_low_target.supply_ptrace_register)
1719 the_low_target.supply_ptrace_register (regno, buf);
1720 else
1721 supply_register (regno, buf);
1722
1723 error_exit:;
1724 }
1725
1726 /* Fetch all registers, or just one, from the child process. */
1727 static void
1728 usr_fetch_inferior_registers (int regno)
1729 {
1730 if (regno == -1 || regno == 0)
1731 for (regno = 0; regno < the_low_target.num_regs; regno++)
1732 fetch_register (regno);
1733 else
1734 fetch_register (regno);
1735 }
1736
1737 /* Store our register values back into the inferior.
1738 If REGNO is -1, do this for all registers.
1739 Otherwise, REGNO specifies which register (so we can save time). */
1740 static void
1741 usr_store_inferior_registers (int regno)
1742 {
1743 CORE_ADDR regaddr;
1744 int i, size;
1745 char *buf;
1746
1747 if (regno >= 0)
1748 {
1749 if (regno >= the_low_target.num_regs)
1750 return;
1751
1752 if ((*the_low_target.cannot_store_register) (regno) == 1)
1753 return;
1754
1755 regaddr = register_addr (regno);
1756 if (regaddr == -1)
1757 return;
1758 errno = 0;
1759 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1760 & - sizeof (PTRACE_XFER_TYPE);
1761 buf = alloca (size);
1762 memset (buf, 0, size);
1763
1764 if (the_low_target.collect_ptrace_register)
1765 the_low_target.collect_ptrace_register (regno, buf);
1766 else
1767 collect_register (regno, buf);
1768
1769 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1770 {
1771 errno = 0;
1772 ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr,
1773 *(PTRACE_XFER_TYPE *) (buf + i));
1774 if (errno != 0)
1775 {
1776 /* At this point, ESRCH should mean the process is
1777 already gone, in which case we simply ignore attempts
1778 to change its registers. See also the related
1779 comment in linux_resume_one_lwp. */
1780 if (errno == ESRCH)
1781 return;
1782
1783 if ((*the_low_target.cannot_store_register) (regno) == 0)
1784 {
1785 char *err = strerror (errno);
1786 char *msg = alloca (strlen (err) + 128);
1787 sprintf (msg, "writing register %d: %s",
1788 regno, err);
1789 error (msg);
1790 return;
1791 }
1792 }
1793 regaddr += sizeof (PTRACE_XFER_TYPE);
1794 }
1795 }
1796 else
1797 for (regno = 0; regno < the_low_target.num_regs; regno++)
1798 usr_store_inferior_registers (regno);
1799 }
1800 #endif /* HAVE_LINUX_USRREGS */
1801
1802
1803
1804 #ifdef HAVE_LINUX_REGSETS
1805
1806 static int
1807 regsets_fetch_inferior_registers ()
1808 {
1809 struct regset_info *regset;
1810 int saw_general_regs = 0;
1811
1812 regset = target_regsets;
1813
1814 while (regset->size >= 0)
1815 {
1816 void *buf;
1817 int res;
1818
1819 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1820 {
1821 regset ++;
1822 continue;
1823 }
1824
1825 buf = xmalloc (regset->size);
1826 #ifndef __sparc__
1827 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1828 #else
1829 res = ptrace (regset->get_request, inferior_pid, buf, 0);
1830 #endif
1831 if (res < 0)
1832 {
1833 if (errno == EIO)
1834 {
1835 /* If we get EIO on a regset, do not try it again for
1836 this process. */
1837 disabled_regsets[regset - target_regsets] = 1;
1838 continue;
1839 }
1840 else
1841 {
1842 char s[256];
1843 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld",
1844 inferior_pid);
1845 perror (s);
1846 }
1847 }
1848 else if (regset->type == GENERAL_REGS)
1849 saw_general_regs = 1;
1850 regset->store_function (buf);
1851 regset ++;
1852 }
1853 if (saw_general_regs)
1854 return 0;
1855 else
1856 return 1;
1857 }
1858
1859 static int
1860 regsets_store_inferior_registers ()
1861 {
1862 struct regset_info *regset;
1863 int saw_general_regs = 0;
1864
1865 regset = target_regsets;
1866
1867 while (regset->size >= 0)
1868 {
1869 void *buf;
1870 int res;
1871
1872 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1873 {
1874 regset ++;
1875 continue;
1876 }
1877
1878 buf = xmalloc (regset->size);
1879
1880 /* First fill the buffer with the current register set contents,
1881 in case there are any items in the kernel's regset that are
1882 not in gdbserver's regcache. */
1883 #ifndef __sparc__
1884 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1885 #else
1886 res = ptrace (regset->get_request, inferior_pid, buf, 0);
1887 #endif
1888
1889 if (res == 0)
1890 {
1891 /* Then overlay our cached registers on that. */
1892 regset->fill_function (buf);
1893
1894 /* Only now do we write the register set. */
1895 #ifndef __sparc__
1896 res = ptrace (regset->set_request, inferior_pid, 0, buf);
1897 #else
1898 res = ptrace (regset->set_request, inferior_pid, buf, 0);
1899 #endif
1900 }
1901
1902 if (res < 0)
1903 {
1904 if (errno == EIO)
1905 {
1906 /* If we get EIO on a regset, do not try it again for
1907 this process. */
1908 disabled_regsets[regset - target_regsets] = 1;
1909 continue;
1910 }
1911 else if (errno == ESRCH)
1912 {
1913 /* At this point, ESRCH should mean the process is
1914 already gone, in which case we simply ignore attempts
1915 to change its registers. See also the related
1916 comment in linux_resume_one_lwp. */
1917 return 0;
1918 }
1919 else
1920 {
1921 perror ("Warning: ptrace(regsets_store_inferior_registers)");
1922 }
1923 }
1924 else if (regset->type == GENERAL_REGS)
1925 saw_general_regs = 1;
1926 regset ++;
1927 free (buf);
1928 }
1929 if (saw_general_regs)
1930 return 0;
1931 else
1932 return 1;
1933 return 0;
1934 }
1935
1936 #endif /* HAVE_LINUX_REGSETS */
1937
1938
1939 void
1940 linux_fetch_registers (int regno)
1941 {
1942 #ifdef HAVE_LINUX_REGSETS
1943 if (regsets_fetch_inferior_registers () == 0)
1944 return;
1945 #endif
1946 #ifdef HAVE_LINUX_USRREGS
1947 usr_fetch_inferior_registers (regno);
1948 #endif
1949 }
1950
1951 void
1952 linux_store_registers (int regno)
1953 {
1954 #ifdef HAVE_LINUX_REGSETS
1955 if (regsets_store_inferior_registers () == 0)
1956 return;
1957 #endif
1958 #ifdef HAVE_LINUX_USRREGS
1959 usr_store_inferior_registers (regno);
1960 #endif
1961 }
1962
1963
1964 /* Copy LEN bytes from inferior's memory starting at MEMADDR
1965 to debugger memory starting at MYADDR. */
1966
1967 static int
1968 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
1969 {
1970 register int i;
1971 /* Round starting address down to longword boundary. */
1972 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1973 /* Round ending address up; get number of longwords that makes. */
1974 register int count
1975 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
1976 / sizeof (PTRACE_XFER_TYPE);
1977 /* Allocate buffer of that many longwords. */
1978 register PTRACE_XFER_TYPE *buffer
1979 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1980 int fd;
1981 char filename[64];
1982
1983 /* Try using /proc. Don't bother for one word. */
1984 if (len >= 3 * sizeof (long))
1985 {
1986 /* We could keep this file open and cache it - possibly one per
1987 thread. That requires some juggling, but is even faster. */
1988 sprintf (filename, "/proc/%ld/mem", inferior_pid);
1989 fd = open (filename, O_RDONLY | O_LARGEFILE);
1990 if (fd == -1)
1991 goto no_proc;
1992
1993 /* If pread64 is available, use it. It's faster if the kernel
1994 supports it (only one syscall), and it's 64-bit safe even on
1995 32-bit platforms (for instance, SPARC debugging a SPARC64
1996 application). */
1997 #ifdef HAVE_PREAD64
1998 if (pread64 (fd, myaddr, len, memaddr) != len)
1999 #else
2000 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
2001 #endif
2002 {
2003 close (fd);
2004 goto no_proc;
2005 }
2006
2007 close (fd);
2008 return 0;
2009 }
2010
2011 no_proc:
2012 /* Read all the longwords */
2013 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2014 {
2015 errno = 0;
2016 buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
2017 (PTRACE_ARG3_TYPE) addr, 0);
2018 if (errno)
2019 return errno;
2020 }
2021
2022 /* Copy appropriate bytes out of the buffer. */
2023 memcpy (myaddr,
2024 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2025 len);
2026
2027 return 0;
2028 }
2029
2030 /* Copy LEN bytes of data from debugger memory at MYADDR
2031 to inferior's memory at MEMADDR.
2032 On failure (cannot write the inferior)
2033 returns the value of errno. */
2034
2035 static int
2036 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2037 {
2038 register int i;
2039 /* Round starting address down to longword boundary. */
2040 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2041 /* Round ending address up; get number of longwords that makes. */
2042 register int count
2043 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2044 /* Allocate buffer of that many longwords. */
2045 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2046
2047 if (debug_threads)
2048 {
2049 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
2050 }
2051
2052 /* Fill start and end extra bytes of buffer with existing memory data. */
2053
2054 buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
2055 (PTRACE_ARG3_TYPE) addr, 0);
2056
2057 if (count > 1)
2058 {
2059 buffer[count - 1]
2060 = ptrace (PTRACE_PEEKTEXT, inferior_pid,
2061 (PTRACE_ARG3_TYPE) (addr + (count - 1)
2062 * sizeof (PTRACE_XFER_TYPE)),
2063 0);
2064 }
2065
2066 /* Copy data to be written over corresponding part of buffer */
2067
2068 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2069
2070 /* Write the entire buffer. */
2071
2072 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2073 {
2074 errno = 0;
2075 ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
2076 if (errno)
2077 return errno;
2078 }
2079
2080 return 0;
2081 }
2082
2083 static int linux_supports_tracefork_flag;
2084
2085 /* Helper functions for linux_test_for_tracefork, called via clone (). */
2086
2087 static int
2088 linux_tracefork_grandchild (void *arg)
2089 {
2090 _exit (0);
2091 }
2092
2093 #define STACK_SIZE 4096
2094
2095 static int
2096 linux_tracefork_child (void *arg)
2097 {
2098 ptrace (PTRACE_TRACEME, 0, 0, 0);
2099 kill (getpid (), SIGSTOP);
2100 #ifdef __ia64__
2101 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2102 CLONE_VM | SIGCHLD, NULL);
2103 #else
2104 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2105 CLONE_VM | SIGCHLD, NULL);
2106 #endif
2107 _exit (0);
2108 }
2109
2110 /* Wrapper function for waitpid which handles EINTR, and emulates
2111 __WALL for systems where that is not available. */
2112
2113 static int
2114 my_waitpid (int pid, int *status, int flags)
2115 {
2116 int ret, out_errno;
2117
2118 if (debug_threads)
2119 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
2120
2121 if (flags & __WALL)
2122 {
2123 sigset_t block_mask, org_mask, wake_mask;
2124 int wnohang;
2125
2126 wnohang = (flags & WNOHANG) != 0;
2127 flags &= ~(__WALL | __WCLONE);
2128 flags |= WNOHANG;
2129
2130 /* Block all signals while here. This avoids knowing about
2131 LinuxThread's signals. */
2132 sigfillset (&block_mask);
2133 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
2134
2135 /* ... except during the sigsuspend below. */
2136 sigemptyset (&wake_mask);
2137
2138 while (1)
2139 {
2140 /* Since all signals are blocked, there's no need to check
2141 for EINTR here. */
2142 ret = waitpid (pid, status, flags);
2143 out_errno = errno;
2144
2145 if (ret == -1 && out_errno != ECHILD)
2146 break;
2147 else if (ret > 0)
2148 break;
2149
2150 if (flags & __WCLONE)
2151 {
2152 /* We've tried both flavors now. If WNOHANG is set,
2153 there's nothing else to do, just bail out. */
2154 if (wnohang)
2155 break;
2156
2157 if (debug_threads)
2158 fprintf (stderr, "blocking\n");
2159
2160 /* Block waiting for signals. */
2161 sigsuspend (&wake_mask);
2162 }
2163
2164 flags ^= __WCLONE;
2165 }
2166
2167 sigprocmask (SIG_SETMASK, &org_mask, NULL);
2168 }
2169 else
2170 {
2171 do
2172 ret = waitpid (pid, status, flags);
2173 while (ret == -1 && errno == EINTR);
2174 out_errno = errno;
2175 }
2176
2177 if (debug_threads)
2178 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
2179 pid, flags, status ? *status : -1, ret);
2180
2181 errno = out_errno;
2182 return ret;
2183 }
2184
2185 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2186 sure that we can enable the option, and that it had the desired
2187 effect. */
2188
2189 static void
2190 linux_test_for_tracefork (void)
2191 {
2192 int child_pid, ret, status;
2193 long second_pid;
2194 char *stack = xmalloc (STACK_SIZE * 4);
2195
2196 linux_supports_tracefork_flag = 0;
2197
2198 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2199 #ifdef __ia64__
2200 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2201 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2202 #else
2203 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2204 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2205 #endif
2206 if (child_pid == -1)
2207 perror_with_name ("clone");
2208
2209 ret = my_waitpid (child_pid, &status, 0);
2210 if (ret == -1)
2211 perror_with_name ("waitpid");
2212 else if (ret != child_pid)
2213 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2214 if (! WIFSTOPPED (status))
2215 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2216
2217 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
2218 if (ret != 0)
2219 {
2220 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2221 if (ret != 0)
2222 {
2223 warning ("linux_test_for_tracefork: failed to kill child");
2224 return;
2225 }
2226
2227 ret = my_waitpid (child_pid, &status, 0);
2228 if (ret != child_pid)
2229 warning ("linux_test_for_tracefork: failed to wait for killed child");
2230 else if (!WIFSIGNALED (status))
2231 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2232 "killed child", status);
2233
2234 return;
2235 }
2236
2237 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2238 if (ret != 0)
2239 warning ("linux_test_for_tracefork: failed to resume child");
2240
2241 ret = my_waitpid (child_pid, &status, 0);
2242
2243 if (ret == child_pid && WIFSTOPPED (status)
2244 && status >> 16 == PTRACE_EVENT_FORK)
2245 {
2246 second_pid = 0;
2247 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2248 if (ret == 0 && second_pid != 0)
2249 {
2250 int second_status;
2251
2252 linux_supports_tracefork_flag = 1;
2253 my_waitpid (second_pid, &second_status, 0);
2254 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2255 if (ret != 0)
2256 warning ("linux_test_for_tracefork: failed to kill second child");
2257 my_waitpid (second_pid, &status, 0);
2258 }
2259 }
2260 else
2261 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2262 "(%d, status 0x%x)", ret, status);
2263
2264 do
2265 {
2266 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2267 if (ret != 0)
2268 warning ("linux_test_for_tracefork: failed to kill child");
2269 my_waitpid (child_pid, &status, 0);
2270 }
2271 while (WIFSTOPPED (status));
2272
2273 free (stack);
2274 }
2275
2276
2277 static void
2278 linux_look_up_symbols (void)
2279 {
2280 #ifdef USE_THREAD_DB
2281 if (thread_db_active)
2282 return;
2283
2284 thread_db_active = thread_db_init (!linux_supports_tracefork_flag);
2285 #endif
2286 }
2287
2288 static void
2289 linux_request_interrupt (void)
2290 {
2291 extern unsigned long signal_pid;
2292
2293 if (cont_thread != 0 && cont_thread != -1)
2294 {
2295 struct lwp_info *lwp;
2296 int lwpid;
2297
2298 lwp = get_thread_lwp (current_inferior);
2299 lwpid = lwpid_of (lwp);
2300 kill_lwp (lwpid, SIGINT);
2301 }
2302 else
2303 kill_lwp (signal_pid, SIGINT);
2304 }
2305
2306 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2307 to debugger memory starting at MYADDR. */
2308
2309 static int
2310 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2311 {
2312 char filename[PATH_MAX];
2313 int fd, n;
2314
2315 snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid);
2316
2317 fd = open (filename, O_RDONLY);
2318 if (fd < 0)
2319 return -1;
2320
2321 if (offset != (CORE_ADDR) 0
2322 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2323 n = -1;
2324 else
2325 n = read (fd, myaddr, len);
2326
2327 close (fd);
2328
2329 return n;
2330 }
2331
2332 /* These watchpoint related wrapper functions simply pass on the function call
2333 if the target has registered a corresponding function. */
2334
2335 static int
2336 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
2337 {
2338 if (the_low_target.insert_watchpoint != NULL)
2339 return the_low_target.insert_watchpoint (type, addr, len);
2340 else
2341 /* Unsupported (see target.h). */
2342 return 1;
2343 }
2344
2345 static int
2346 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
2347 {
2348 if (the_low_target.remove_watchpoint != NULL)
2349 return the_low_target.remove_watchpoint (type, addr, len);
2350 else
2351 /* Unsupported (see target.h). */
2352 return 1;
2353 }
2354
2355 static int
2356 linux_stopped_by_watchpoint (void)
2357 {
2358 if (the_low_target.stopped_by_watchpoint != NULL)
2359 return the_low_target.stopped_by_watchpoint ();
2360 else
2361 return 0;
2362 }
2363
2364 static CORE_ADDR
2365 linux_stopped_data_address (void)
2366 {
2367 if (the_low_target.stopped_data_address != NULL)
2368 return the_low_target.stopped_data_address ();
2369 else
2370 return 0;
2371 }
2372
2373 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2374 #if defined(__mcoldfire__)
2375 /* These should really be defined in the kernel's ptrace.h header. */
2376 #define PT_TEXT_ADDR 49*4
2377 #define PT_DATA_ADDR 50*4
2378 #define PT_TEXT_END_ADDR 51*4
2379 #endif
2380
2381 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2382 to tell gdb about. */
2383
2384 static int
2385 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2386 {
2387 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2388 unsigned long text, text_end, data;
2389 int pid = lwpid_of (get_thread_lwp (current_inferior));
2390
2391 errno = 0;
2392
2393 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2394 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2395 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2396
2397 if (errno == 0)
2398 {
2399 /* Both text and data offsets produced at compile-time (and so
2400 used by gdb) are relative to the beginning of the program,
2401 with the data segment immediately following the text segment.
2402 However, the actual runtime layout in memory may put the data
2403 somewhere else, so when we send gdb a data base-address, we
2404 use the real data base address and subtract the compile-time
2405 data base-address from it (which is just the length of the
2406 text segment). BSS immediately follows data in both
2407 cases. */
2408 *text_p = text;
2409 *data_p = data - (text_end - text);
2410
2411 return 1;
2412 }
2413 #endif
2414 return 0;
2415 }
2416 #endif
2417
2418 static int
2419 linux_qxfer_osdata (const char *annex,
2420 unsigned char *readbuf, unsigned const char *writebuf,
2421 CORE_ADDR offset, int len)
2422 {
2423 /* We make the process list snapshot when the object starts to be
2424 read. */
2425 static const char *buf;
2426 static long len_avail = -1;
2427 static struct buffer buffer;
2428
2429 DIR *dirp;
2430
2431 if (strcmp (annex, "processes") != 0)
2432 return 0;
2433
2434 if (!readbuf || writebuf)
2435 return 0;
2436
2437 if (offset == 0)
2438 {
2439 if (len_avail != -1 && len_avail != 0)
2440 buffer_free (&buffer);
2441 len_avail = 0;
2442 buf = NULL;
2443 buffer_init (&buffer);
2444 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2445
2446 dirp = opendir ("/proc");
2447 if (dirp)
2448 {
2449 struct dirent *dp;
2450 while ((dp = readdir (dirp)) != NULL)
2451 {
2452 struct stat statbuf;
2453 char procentry[sizeof ("/proc/4294967295")];
2454
2455 if (!isdigit (dp->d_name[0])
2456 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2457 continue;
2458
2459 sprintf (procentry, "/proc/%s", dp->d_name);
2460 if (stat (procentry, &statbuf) == 0
2461 && S_ISDIR (statbuf.st_mode))
2462 {
2463 char pathname[128];
2464 FILE *f;
2465 char cmd[MAXPATHLEN + 1];
2466 struct passwd *entry;
2467
2468 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2469 entry = getpwuid (statbuf.st_uid);
2470
2471 if ((f = fopen (pathname, "r")) != NULL)
2472 {
2473 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2474 if (len > 0)
2475 {
2476 int i;
2477 for (i = 0; i < len; i++)
2478 if (cmd[i] == '\0')
2479 cmd[i] = ' ';
2480 cmd[len] = '\0';
2481
2482 buffer_xml_printf (
2483 &buffer,
2484 "<item>"
2485 "<column name=\"pid\">%s</column>"
2486 "<column name=\"user\">%s</column>"
2487 "<column name=\"command\">%s</column>"
2488 "</item>",
2489 dp->d_name,
2490 entry ? entry->pw_name : "?",
2491 cmd);
2492 }
2493 fclose (f);
2494 }
2495 }
2496 }
2497
2498 closedir (dirp);
2499 }
2500 buffer_grow_str0 (&buffer, "</osdata>\n");
2501 buf = buffer_finish (&buffer);
2502 len_avail = strlen (buf);
2503 }
2504
2505 if (offset >= len_avail)
2506 {
2507 /* Done. Get rid of the data. */
2508 buffer_free (&buffer);
2509 buf = NULL;
2510 len_avail = 0;
2511 return 0;
2512 }
2513
2514 if (len > len_avail - offset)
2515 len = len_avail - offset;
2516 memcpy (readbuf, buf + offset, len);
2517
2518 return len;
2519 }
2520
2521 static int
2522 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
2523 unsigned const char *writebuf, CORE_ADDR offset, int len)
2524 {
2525 struct siginfo siginfo;
2526 long pid = -1;
2527
2528 if (current_inferior == NULL)
2529 return -1;
2530
2531 pid = lwpid_of (get_thread_lwp (current_inferior));
2532
2533 if (debug_threads)
2534 fprintf (stderr, "%s siginfo for lwp %ld.\n",
2535 readbuf != NULL ? "Reading" : "Writing",
2536 pid);
2537
2538 if (offset > sizeof (siginfo))
2539 return -1;
2540
2541 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
2542 return -1;
2543
2544 if (offset + len > sizeof (siginfo))
2545 len = sizeof (siginfo) - offset;
2546
2547 if (readbuf != NULL)
2548 memcpy (readbuf, (char *) &siginfo + offset, len);
2549 else
2550 {
2551 memcpy ((char *) &siginfo + offset, writebuf, len);
2552 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
2553 return -1;
2554 }
2555
2556 return len;
2557 }
2558
2559 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
2560 so we notice when children change state; as the handler for the
2561 sigsuspend in my_waitpid. */
2562
2563 static void
2564 sigchld_handler (int signo)
2565 {
2566 int old_errno = errno;
2567
2568 if (debug_threads)
2569 /* fprintf is not async-signal-safe, so call write directly. */
2570 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
2571
2572 if (target_is_async_p ())
2573 async_file_mark (); /* trigger a linux_wait */
2574
2575 errno = old_errno;
2576 }
2577
2578 static int
2579 linux_supports_non_stop (void)
2580 {
2581 return 1;
2582 }
2583
2584 static int
2585 linux_async (int enable)
2586 {
2587 int previous = (linux_event_pipe[0] != -1);
2588
2589 if (previous != enable)
2590 {
2591 sigset_t mask;
2592 sigemptyset (&mask);
2593 sigaddset (&mask, SIGCHLD);
2594
2595 sigprocmask (SIG_BLOCK, &mask, NULL);
2596
2597 if (enable)
2598 {
2599 if (pipe (linux_event_pipe) == -1)
2600 fatal ("creating event pipe failed.");
2601
2602 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
2603 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
2604
2605 /* Register the event loop handler. */
2606 add_file_handler (linux_event_pipe[0],
2607 handle_target_event, NULL);
2608
2609 /* Always trigger a linux_wait. */
2610 async_file_mark ();
2611 }
2612 else
2613 {
2614 delete_file_handler (linux_event_pipe[0]);
2615
2616 close (linux_event_pipe[0]);
2617 close (linux_event_pipe[1]);
2618 linux_event_pipe[0] = -1;
2619 linux_event_pipe[1] = -1;
2620 }
2621
2622 sigprocmask (SIG_UNBLOCK, &mask, NULL);
2623 }
2624
2625 return previous;
2626 }
2627
2628 static int
2629 linux_start_non_stop (int nonstop)
2630 {
2631 /* Register or unregister from event-loop accordingly. */
2632 linux_async (nonstop);
2633 return 0;
2634 }
2635
2636 static struct target_ops linux_target_ops = {
2637 linux_create_inferior,
2638 linux_attach,
2639 linux_kill,
2640 linux_detach,
2641 linux_join,
2642 linux_thread_alive,
2643 linux_resume,
2644 linux_wait,
2645 linux_fetch_registers,
2646 linux_store_registers,
2647 linux_read_memory,
2648 linux_write_memory,
2649 linux_look_up_symbols,
2650 linux_request_interrupt,
2651 linux_read_auxv,
2652 linux_insert_watchpoint,
2653 linux_remove_watchpoint,
2654 linux_stopped_by_watchpoint,
2655 linux_stopped_data_address,
2656 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2657 linux_read_offsets,
2658 #else
2659 NULL,
2660 #endif
2661 #ifdef USE_THREAD_DB
2662 thread_db_get_tls_address,
2663 #else
2664 NULL,
2665 #endif
2666 NULL,
2667 hostio_last_error_from_errno,
2668 linux_qxfer_osdata,
2669 linux_xfer_siginfo,
2670 linux_supports_non_stop,
2671 linux_async,
2672 linux_start_non_stop,
2673 };
2674
2675 static void
2676 linux_init_signals ()
2677 {
2678 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
2679 to find what the cancel signal actually is. */
2680 signal (__SIGRTMIN+1, SIG_IGN);
2681 }
2682
2683 void
2684 initialize_low (void)
2685 {
2686 struct sigaction sigchld_action;
2687 memset (&sigchld_action, 0, sizeof (sigchld_action));
2688 thread_db_active = 0;
2689 set_target_ops (&linux_target_ops);
2690 set_breakpoint_data (the_low_target.breakpoint,
2691 the_low_target.breakpoint_len);
2692 linux_init_signals ();
2693 linux_test_for_tracefork ();
2694 #ifdef HAVE_LINUX_REGSETS
2695 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
2696 ;
2697 disabled_regsets = xmalloc (num_regsets);
2698 #endif
2699
2700 sigchld_action.sa_handler = sigchld_handler;
2701 sigemptyset (&sigchld_action.sa_mask);
2702 sigchld_action.sa_flags = SA_RESTART;
2703 sigaction (SIGCHLD, &sigchld_action, NULL);
2704 }
This page took 0.113065 seconds and 5 git commands to generate.