gdb/gdbserver/
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include "linux-ptrace.h"
28 #include "linux-procfs.h"
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #ifndef ELFMAG0
46 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
50 #include <elf.h>
51 #endif
52
53 #ifndef SPUFS_MAGIC
54 #define SPUFS_MAGIC 0x23c9b64e
55 #endif
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif
63
64 #ifndef O_LARGEFILE
65 #define O_LARGEFILE 0
66 #endif
67
68 #ifndef W_STOPCODE
69 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
70 #endif
71
72 /* This is the kernel's hard limit. Not to be confused with
73 SIGRTMIN. */
74 #ifndef __SIGRTMIN
75 #define __SIGRTMIN 32
76 #endif
77
78 #ifdef __UCLIBC__
79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
80 #define HAS_NOMMU
81 #endif
82 #endif
83
84 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
85 representation of the thread ID.
86
87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
88 the same as the LWP ID.
89
90 ``all_processes'' is keyed by the "overall process ID", which
91 GNU/Linux calls tgid, "thread group ID". */
92
93 struct inferior_list all_lwps;
94
95 /* A list of all unknown processes which receive stop signals. Some other
96 process will presumably claim each of these as forked children
97 momentarily. */
98
99 struct inferior_list stopped_pids;
100
101 /* FIXME this is a bit of a hack, and could be removed. */
102 int stopping_threads;
103
104 /* FIXME make into a target method? */
105 int using_threads = 1;
106
107 /* True if we're presently stabilizing threads (moving them out of
108 jump pads). */
109 static int stabilizing_threads;
110
111 /* This flag is true iff we've just created or attached to our first
112 inferior but it has not stopped yet. As soon as it does, we need
113 to call the low target's arch_setup callback. Doing this only on
114 the first inferior avoids reinializing the architecture on every
115 inferior, and avoids messing with the register caches of the
116 already running inferiors. NOTE: this assumes all inferiors under
117 control of gdbserver have the same architecture. */
118 static int new_inferior;
119
120 static void linux_resume_one_lwp (struct lwp_info *lwp,
121 int step, int signal, siginfo_t *info);
122 static void linux_resume (struct thread_resume *resume_info, size_t n);
123 static void stop_all_lwps (int suspend, struct lwp_info *except);
124 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
125 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
126 static void *add_lwp (ptid_t ptid);
127 static int linux_stopped_by_watchpoint (void);
128 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
129 static void proceed_all_lwps (void);
130 static int finish_step_over (struct lwp_info *lwp);
131 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
132 static int kill_lwp (unsigned long lwpid, int signo);
133 static void linux_enable_event_reporting (int pid);
134
135 /* True if the low target can hardware single-step. Such targets
136 don't need a BREAKPOINT_REINSERT_ADDR callback. */
137
138 static int
139 can_hardware_single_step (void)
140 {
141 return (the_low_target.breakpoint_reinsert_addr == NULL);
142 }
143
144 /* True if the low target supports memory breakpoints. If so, we'll
145 have a GET_PC implementation. */
146
147 static int
148 supports_breakpoints (void)
149 {
150 return (the_low_target.get_pc != NULL);
151 }
152
153 /* Returns true if this target can support fast tracepoints. This
154 does not mean that the in-process agent has been loaded in the
155 inferior. */
156
157 static int
158 supports_fast_tracepoints (void)
159 {
160 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
161 }
162
163 struct pending_signals
164 {
165 int signal;
166 siginfo_t info;
167 struct pending_signals *prev;
168 };
169
170 #define PTRACE_ARG3_TYPE void *
171 #define PTRACE_ARG4_TYPE void *
172 #define PTRACE_XFER_TYPE long
173
174 #ifdef HAVE_LINUX_REGSETS
175 static char *disabled_regsets;
176 static int num_regsets;
177 #endif
178
179 /* The read/write ends of the pipe registered as waitable file in the
180 event loop. */
181 static int linux_event_pipe[2] = { -1, -1 };
182
183 /* True if we're currently in async mode. */
184 #define target_is_async_p() (linux_event_pipe[0] != -1)
185
186 static void send_sigstop (struct lwp_info *lwp);
187 static void wait_for_sigstop (struct inferior_list_entry *entry);
188
189 /* Accepts an integer PID; Returns a string representing a file that
190 can be opened to get info for the child process.
191 Space for the result is malloc'd, caller must free. */
192
193 char *
194 linux_child_pid_to_exec_file (int pid)
195 {
196 char *name1, *name2;
197
198 name1 = xmalloc (MAXPATHLEN);
199 name2 = xmalloc (MAXPATHLEN);
200 memset (name2, 0, MAXPATHLEN);
201
202 sprintf (name1, "/proc/%d/exe", pid);
203 if (readlink (name1, name2, MAXPATHLEN) > 0)
204 {
205 free (name1);
206 return name2;
207 }
208 else
209 {
210 free (name2);
211 return name1;
212 }
213 }
214
215 /* Return non-zero if HEADER is a 64-bit ELF file. */
216
217 static int
218 elf_64_header_p (const Elf64_Ehdr *header)
219 {
220 return (header->e_ident[EI_MAG0] == ELFMAG0
221 && header->e_ident[EI_MAG1] == ELFMAG1
222 && header->e_ident[EI_MAG2] == ELFMAG2
223 && header->e_ident[EI_MAG3] == ELFMAG3
224 && header->e_ident[EI_CLASS] == ELFCLASS64);
225 }
226
227 /* Return non-zero if FILE is a 64-bit ELF file,
228 zero if the file is not a 64-bit ELF file,
229 and -1 if the file is not accessible or doesn't exist. */
230
231 int
232 elf_64_file_p (const char *file)
233 {
234 Elf64_Ehdr header;
235 int fd;
236
237 fd = open (file, O_RDONLY);
238 if (fd < 0)
239 return -1;
240
241 if (read (fd, &header, sizeof (header)) != sizeof (header))
242 {
243 close (fd);
244 return 0;
245 }
246 close (fd);
247
248 return elf_64_header_p (&header);
249 }
250
251 static void
252 delete_lwp (struct lwp_info *lwp)
253 {
254 remove_thread (get_lwp_thread (lwp));
255 remove_inferior (&all_lwps, &lwp->head);
256 free (lwp->arch_private);
257 free (lwp);
258 }
259
260 /* Add a process to the common process list, and set its private
261 data. */
262
263 static struct process_info *
264 linux_add_process (int pid, int attached)
265 {
266 struct process_info *proc;
267
268 /* Is this the first process? If so, then set the arch. */
269 if (all_processes.head == NULL)
270 new_inferior = 1;
271
272 proc = add_process (pid, attached);
273 proc->private = xcalloc (1, sizeof (*proc->private));
274
275 if (the_low_target.new_process != NULL)
276 proc->private->arch_private = the_low_target.new_process ();
277
278 return proc;
279 }
280
281 /* Wrapper function for waitpid which handles EINTR, and emulates
282 __WALL for systems where that is not available. */
283
284 static int
285 my_waitpid (int pid, int *status, int flags)
286 {
287 int ret, out_errno;
288
289 if (debug_threads)
290 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
291
292 if (flags & __WALL)
293 {
294 sigset_t block_mask, org_mask, wake_mask;
295 int wnohang;
296
297 wnohang = (flags & WNOHANG) != 0;
298 flags &= ~(__WALL | __WCLONE);
299 flags |= WNOHANG;
300
301 /* Block all signals while here. This avoids knowing about
302 LinuxThread's signals. */
303 sigfillset (&block_mask);
304 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
305
306 /* ... except during the sigsuspend below. */
307 sigemptyset (&wake_mask);
308
309 while (1)
310 {
311 /* Since all signals are blocked, there's no need to check
312 for EINTR here. */
313 ret = waitpid (pid, status, flags);
314 out_errno = errno;
315
316 if (ret == -1 && out_errno != ECHILD)
317 break;
318 else if (ret > 0)
319 break;
320
321 if (flags & __WCLONE)
322 {
323 /* We've tried both flavors now. If WNOHANG is set,
324 there's nothing else to do, just bail out. */
325 if (wnohang)
326 break;
327
328 if (debug_threads)
329 fprintf (stderr, "blocking\n");
330
331 /* Block waiting for signals. */
332 sigsuspend (&wake_mask);
333 }
334
335 flags ^= __WCLONE;
336 }
337
338 sigprocmask (SIG_SETMASK, &org_mask, NULL);
339 }
340 else
341 {
342 do
343 ret = waitpid (pid, status, flags);
344 while (ret == -1 && errno == EINTR);
345 out_errno = errno;
346 }
347
348 if (debug_threads)
349 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
350 pid, flags, status ? *status : -1, ret);
351
352 errno = out_errno;
353 return ret;
354 }
355
356 /* Handle a GNU/Linux extended wait response. If we see a clone
357 event, we need to add the new LWP to our list (and not report the
358 trap to higher layers). */
359
360 static void
361 handle_extended_wait (struct lwp_info *event_child, int wstat)
362 {
363 int event = wstat >> 16;
364 struct lwp_info *new_lwp;
365
366 if (event == PTRACE_EVENT_CLONE)
367 {
368 ptid_t ptid;
369 unsigned long new_pid;
370 int ret, status = W_STOPCODE (SIGSTOP);
371
372 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
373
374 /* If we haven't already seen the new PID stop, wait for it now. */
375 if (! pull_pid_from_list (&stopped_pids, new_pid))
376 {
377 /* The new child has a pending SIGSTOP. We can't affect it until it
378 hits the SIGSTOP, but we're already attached. */
379
380 ret = my_waitpid (new_pid, &status, __WALL);
381
382 if (ret == -1)
383 perror_with_name ("waiting for new child");
384 else if (ret != new_pid)
385 warning ("wait returned unexpected PID %d", ret);
386 else if (!WIFSTOPPED (status))
387 warning ("wait returned unexpected status 0x%x", status);
388 }
389
390 linux_enable_event_reporting (new_pid);
391
392 ptid = ptid_build (pid_of (event_child), new_pid, 0);
393 new_lwp = (struct lwp_info *) add_lwp (ptid);
394 add_thread (ptid, new_lwp);
395
396 /* Either we're going to immediately resume the new thread
397 or leave it stopped. linux_resume_one_lwp is a nop if it
398 thinks the thread is currently running, so set this first
399 before calling linux_resume_one_lwp. */
400 new_lwp->stopped = 1;
401
402 /* Normally we will get the pending SIGSTOP. But in some cases
403 we might get another signal delivered to the group first.
404 If we do get another signal, be sure not to lose it. */
405 if (WSTOPSIG (status) == SIGSTOP)
406 {
407 if (stopping_threads)
408 new_lwp->stop_pc = get_stop_pc (new_lwp);
409 else
410 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
411 }
412 else
413 {
414 new_lwp->stop_expected = 1;
415
416 if (stopping_threads)
417 {
418 new_lwp->stop_pc = get_stop_pc (new_lwp);
419 new_lwp->status_pending_p = 1;
420 new_lwp->status_pending = status;
421 }
422 else
423 /* Pass the signal on. This is what GDB does - except
424 shouldn't we really report it instead? */
425 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
426 }
427
428 /* Always resume the current thread. If we are stopping
429 threads, it will have a pending SIGSTOP; we may as well
430 collect it now. */
431 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
432 }
433 }
434
435 /* Return the PC as read from the regcache of LWP, without any
436 adjustment. */
437
438 static CORE_ADDR
439 get_pc (struct lwp_info *lwp)
440 {
441 struct thread_info *saved_inferior;
442 struct regcache *regcache;
443 CORE_ADDR pc;
444
445 if (the_low_target.get_pc == NULL)
446 return 0;
447
448 saved_inferior = current_inferior;
449 current_inferior = get_lwp_thread (lwp);
450
451 regcache = get_thread_regcache (current_inferior, 1);
452 pc = (*the_low_target.get_pc) (regcache);
453
454 if (debug_threads)
455 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
456
457 current_inferior = saved_inferior;
458 return pc;
459 }
460
461 /* This function should only be called if LWP got a SIGTRAP.
462 The SIGTRAP could mean several things.
463
464 On i386, where decr_pc_after_break is non-zero:
465 If we were single-stepping this process using PTRACE_SINGLESTEP,
466 we will get only the one SIGTRAP (even if the instruction we
467 stepped over was a breakpoint). The value of $eip will be the
468 next instruction.
469 If we continue the process using PTRACE_CONT, we will get a
470 SIGTRAP when we hit a breakpoint. The value of $eip will be
471 the instruction after the breakpoint (i.e. needs to be
472 decremented). If we report the SIGTRAP to GDB, we must also
473 report the undecremented PC. If we cancel the SIGTRAP, we
474 must resume at the decremented PC.
475
476 (Presumably, not yet tested) On a non-decr_pc_after_break machine
477 with hardware or kernel single-step:
478 If we single-step over a breakpoint instruction, our PC will
479 point at the following instruction. If we continue and hit a
480 breakpoint instruction, our PC will point at the breakpoint
481 instruction. */
482
483 static CORE_ADDR
484 get_stop_pc (struct lwp_info *lwp)
485 {
486 CORE_ADDR stop_pc;
487
488 if (the_low_target.get_pc == NULL)
489 return 0;
490
491 stop_pc = get_pc (lwp);
492
493 if (WSTOPSIG (lwp->last_status) == SIGTRAP
494 && !lwp->stepping
495 && !lwp->stopped_by_watchpoint
496 && lwp->last_status >> 16 == 0)
497 stop_pc -= the_low_target.decr_pc_after_break;
498
499 if (debug_threads)
500 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
501
502 return stop_pc;
503 }
504
505 static void *
506 add_lwp (ptid_t ptid)
507 {
508 struct lwp_info *lwp;
509
510 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
511 memset (lwp, 0, sizeof (*lwp));
512
513 lwp->head.id = ptid;
514
515 if (the_low_target.new_thread != NULL)
516 lwp->arch_private = the_low_target.new_thread ();
517
518 add_inferior_to_list (&all_lwps, &lwp->head);
519
520 return lwp;
521 }
522
523 /* Start an inferior process and returns its pid.
524 ALLARGS is a vector of program-name and args. */
525
526 static int
527 linux_create_inferior (char *program, char **allargs)
528 {
529 #ifdef HAVE_PERSONALITY
530 int personality_orig = 0, personality_set = 0;
531 #endif
532 struct lwp_info *new_lwp;
533 int pid;
534 ptid_t ptid;
535
536 #ifdef HAVE_PERSONALITY
537 if (disable_randomization)
538 {
539 errno = 0;
540 personality_orig = personality (0xffffffff);
541 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
542 {
543 personality_set = 1;
544 personality (personality_orig | ADDR_NO_RANDOMIZE);
545 }
546 if (errno != 0 || (personality_set
547 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
548 warning ("Error disabling address space randomization: %s",
549 strerror (errno));
550 }
551 #endif
552
553 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
554 pid = vfork ();
555 #else
556 pid = fork ();
557 #endif
558 if (pid < 0)
559 perror_with_name ("fork");
560
561 if (pid == 0)
562 {
563 ptrace (PTRACE_TRACEME, 0, 0, 0);
564
565 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
566 signal (__SIGRTMIN + 1, SIG_DFL);
567 #endif
568
569 setpgid (0, 0);
570
571 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
572 stdout to stderr so that inferior i/o doesn't corrupt the connection.
573 Also, redirect stdin to /dev/null. */
574 if (remote_connection_is_stdio ())
575 {
576 close (0);
577 open ("/dev/null", O_RDONLY);
578 dup2 (2, 1);
579 if (write (2, "stdin/stdout redirected\n",
580 sizeof ("stdin/stdout redirected\n") - 1) < 0)
581 /* Errors ignored. */;
582 }
583
584 execv (program, allargs);
585 if (errno == ENOENT)
586 execvp (program, allargs);
587
588 fprintf (stderr, "Cannot exec %s: %s.\n", program,
589 strerror (errno));
590 fflush (stderr);
591 _exit (0177);
592 }
593
594 #ifdef HAVE_PERSONALITY
595 if (personality_set)
596 {
597 errno = 0;
598 personality (personality_orig);
599 if (errno != 0)
600 warning ("Error restoring address space randomization: %s",
601 strerror (errno));
602 }
603 #endif
604
605 linux_add_process (pid, 0);
606
607 ptid = ptid_build (pid, pid, 0);
608 new_lwp = add_lwp (ptid);
609 add_thread (ptid, new_lwp);
610 new_lwp->must_set_ptrace_flags = 1;
611
612 return pid;
613 }
614
615 /* Attach to an inferior process. */
616
617 static void
618 linux_attach_lwp_1 (unsigned long lwpid, int initial)
619 {
620 ptid_t ptid;
621 struct lwp_info *new_lwp;
622
623 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
624 {
625 if (!initial)
626 {
627 /* If we fail to attach to an LWP, just warn. */
628 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
629 strerror (errno), errno);
630 fflush (stderr);
631 return;
632 }
633 else
634 /* If we fail to attach to a process, report an error. */
635 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
636 strerror (errno), errno);
637 }
638
639 if (initial)
640 /* If lwp is the tgid, we handle adding existing threads later.
641 Otherwise we just add lwp without bothering about any other
642 threads. */
643 ptid = ptid_build (lwpid, lwpid, 0);
644 else
645 {
646 /* Note that extracting the pid from the current inferior is
647 safe, since we're always called in the context of the same
648 process as this new thread. */
649 int pid = pid_of (get_thread_lwp (current_inferior));
650 ptid = ptid_build (pid, lwpid, 0);
651 }
652
653 new_lwp = (struct lwp_info *) add_lwp (ptid);
654 add_thread (ptid, new_lwp);
655
656 /* We need to wait for SIGSTOP before being able to make the next
657 ptrace call on this LWP. */
658 new_lwp->must_set_ptrace_flags = 1;
659
660 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
661 brings it to a halt.
662
663 There are several cases to consider here:
664
665 1) gdbserver has already attached to the process and is being notified
666 of a new thread that is being created.
667 In this case we should ignore that SIGSTOP and resume the
668 process. This is handled below by setting stop_expected = 1,
669 and the fact that add_thread sets last_resume_kind ==
670 resume_continue.
671
672 2) This is the first thread (the process thread), and we're attaching
673 to it via attach_inferior.
674 In this case we want the process thread to stop.
675 This is handled by having linux_attach set last_resume_kind ==
676 resume_stop after we return.
677
678 If the pid we are attaching to is also the tgid, we attach to and
679 stop all the existing threads. Otherwise, we attach to pid and
680 ignore any other threads in the same group as this pid.
681
682 3) GDB is connecting to gdbserver and is requesting an enumeration of all
683 existing threads.
684 In this case we want the thread to stop.
685 FIXME: This case is currently not properly handled.
686 We should wait for the SIGSTOP but don't. Things work apparently
687 because enough time passes between when we ptrace (ATTACH) and when
688 gdb makes the next ptrace call on the thread.
689
690 On the other hand, if we are currently trying to stop all threads, we
691 should treat the new thread as if we had sent it a SIGSTOP. This works
692 because we are guaranteed that the add_lwp call above added us to the
693 end of the list, and so the new thread has not yet reached
694 wait_for_sigstop (but will). */
695 new_lwp->stop_expected = 1;
696 }
697
698 void
699 linux_attach_lwp (unsigned long lwpid)
700 {
701 linux_attach_lwp_1 (lwpid, 0);
702 }
703
704 /* Attach to PID. If PID is the tgid, attach to it and all
705 of its threads. */
706
707 int
708 linux_attach (unsigned long pid)
709 {
710 /* Attach to PID. We will check for other threads
711 soon. */
712 linux_attach_lwp_1 (pid, 1);
713 linux_add_process (pid, 1);
714
715 if (!non_stop)
716 {
717 struct thread_info *thread;
718
719 /* Don't ignore the initial SIGSTOP if we just attached to this
720 process. It will be collected by wait shortly. */
721 thread = find_thread_ptid (ptid_build (pid, pid, 0));
722 thread->last_resume_kind = resume_stop;
723 }
724
725 if (linux_proc_get_tgid (pid) == pid)
726 {
727 DIR *dir;
728 char pathname[128];
729
730 sprintf (pathname, "/proc/%ld/task", pid);
731
732 dir = opendir (pathname);
733
734 if (!dir)
735 {
736 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
737 fflush (stderr);
738 }
739 else
740 {
741 /* At this point we attached to the tgid. Scan the task for
742 existing threads. */
743 unsigned long lwp;
744 int new_threads_found;
745 int iterations = 0;
746 struct dirent *dp;
747
748 while (iterations < 2)
749 {
750 new_threads_found = 0;
751 /* Add all the other threads. While we go through the
752 threads, new threads may be spawned. Cycle through
753 the list of threads until we have done two iterations without
754 finding new threads. */
755 while ((dp = readdir (dir)) != NULL)
756 {
757 /* Fetch one lwp. */
758 lwp = strtoul (dp->d_name, NULL, 10);
759
760 /* Is this a new thread? */
761 if (lwp
762 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
763 {
764 linux_attach_lwp_1 (lwp, 0);
765 new_threads_found++;
766
767 if (debug_threads)
768 fprintf (stderr, "\
769 Found and attached to new lwp %ld\n", lwp);
770 }
771 }
772
773 if (!new_threads_found)
774 iterations++;
775 else
776 iterations = 0;
777
778 rewinddir (dir);
779 }
780 closedir (dir);
781 }
782 }
783
784 return 0;
785 }
786
787 struct counter
788 {
789 int pid;
790 int count;
791 };
792
793 static int
794 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
795 {
796 struct counter *counter = args;
797
798 if (ptid_get_pid (entry->id) == counter->pid)
799 {
800 if (++counter->count > 1)
801 return 1;
802 }
803
804 return 0;
805 }
806
807 static int
808 last_thread_of_process_p (struct thread_info *thread)
809 {
810 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
811 int pid = ptid_get_pid (ptid);
812 struct counter counter = { pid , 0 };
813
814 return (find_inferior (&all_threads,
815 second_thread_of_pid_p, &counter) == NULL);
816 }
817
818 /* Kill the inferior lwp. */
819
820 static int
821 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
822 {
823 struct thread_info *thread = (struct thread_info *) entry;
824 struct lwp_info *lwp = get_thread_lwp (thread);
825 int wstat;
826 int pid = * (int *) args;
827
828 if (ptid_get_pid (entry->id) != pid)
829 return 0;
830
831 /* We avoid killing the first thread here, because of a Linux kernel (at
832 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
833 the children get a chance to be reaped, it will remain a zombie
834 forever. */
835
836 if (lwpid_of (lwp) == pid)
837 {
838 if (debug_threads)
839 fprintf (stderr, "lkop: is last of process %s\n",
840 target_pid_to_str (entry->id));
841 return 0;
842 }
843
844 do
845 {
846 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
847
848 /* Make sure it died. The loop is most likely unnecessary. */
849 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
850 } while (pid > 0 && WIFSTOPPED (wstat));
851
852 return 0;
853 }
854
855 static int
856 linux_kill (int pid)
857 {
858 struct process_info *process;
859 struct lwp_info *lwp;
860 int wstat;
861 int lwpid;
862
863 process = find_process_pid (pid);
864 if (process == NULL)
865 return -1;
866
867 /* If we're killing a running inferior, make sure it is stopped
868 first, as PTRACE_KILL will not work otherwise. */
869 stop_all_lwps (0, NULL);
870
871 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
872
873 /* See the comment in linux_kill_one_lwp. We did not kill the first
874 thread in the list, so do so now. */
875 lwp = find_lwp_pid (pid_to_ptid (pid));
876
877 if (lwp == NULL)
878 {
879 if (debug_threads)
880 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
881 lwpid_of (lwp), pid);
882 }
883 else
884 {
885 if (debug_threads)
886 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
887 lwpid_of (lwp), pid);
888
889 do
890 {
891 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
892
893 /* Make sure it died. The loop is most likely unnecessary. */
894 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
895 } while (lwpid > 0 && WIFSTOPPED (wstat));
896 }
897
898 the_target->mourn (process);
899
900 /* Since we presently can only stop all lwps of all processes, we
901 need to unstop lwps of other processes. */
902 unstop_all_lwps (0, NULL);
903 return 0;
904 }
905
906 static int
907 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
908 {
909 struct thread_info *thread = (struct thread_info *) entry;
910 struct lwp_info *lwp = get_thread_lwp (thread);
911 int pid = * (int *) args;
912
913 if (ptid_get_pid (entry->id) != pid)
914 return 0;
915
916 /* If this process is stopped but is expecting a SIGSTOP, then make
917 sure we take care of that now. This isn't absolutely guaranteed
918 to collect the SIGSTOP, but is fairly likely to. */
919 if (lwp->stop_expected)
920 {
921 int wstat;
922 /* Clear stop_expected, so that the SIGSTOP will be reported. */
923 lwp->stop_expected = 0;
924 linux_resume_one_lwp (lwp, 0, 0, NULL);
925 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
926 }
927
928 /* Flush any pending changes to the process's registers. */
929 regcache_invalidate_one ((struct inferior_list_entry *)
930 get_lwp_thread (lwp));
931
932 /* Finally, let it resume. */
933 if (the_low_target.prepare_to_resume != NULL)
934 the_low_target.prepare_to_resume (lwp);
935 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
936
937 delete_lwp (lwp);
938 return 0;
939 }
940
941 static int
942 linux_detach (int pid)
943 {
944 struct process_info *process;
945
946 process = find_process_pid (pid);
947 if (process == NULL)
948 return -1;
949
950 /* Stop all threads before detaching. First, ptrace requires that
951 the thread is stopped to sucessfully detach. Second, thread_db
952 may need to uninstall thread event breakpoints from memory, which
953 only works with a stopped process anyway. */
954 stop_all_lwps (0, NULL);
955
956 #ifdef USE_THREAD_DB
957 thread_db_detach (process);
958 #endif
959
960 /* Stabilize threads (move out of jump pads). */
961 stabilize_threads ();
962
963 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
964
965 the_target->mourn (process);
966
967 /* Since we presently can only stop all lwps of all processes, we
968 need to unstop lwps of other processes. */
969 unstop_all_lwps (0, NULL);
970 return 0;
971 }
972
973 /* Remove all LWPs that belong to process PROC from the lwp list. */
974
975 static int
976 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
977 {
978 struct lwp_info *lwp = (struct lwp_info *) entry;
979 struct process_info *process = proc;
980
981 if (pid_of (lwp) == pid_of (process))
982 delete_lwp (lwp);
983
984 return 0;
985 }
986
987 static void
988 linux_mourn (struct process_info *process)
989 {
990 struct process_info_private *priv;
991
992 #ifdef USE_THREAD_DB
993 thread_db_mourn (process);
994 #endif
995
996 find_inferior (&all_lwps, delete_lwp_callback, process);
997
998 /* Freeing all private data. */
999 priv = process->private;
1000 free (priv->arch_private);
1001 free (priv);
1002 process->private = NULL;
1003
1004 remove_process (process);
1005 }
1006
1007 static void
1008 linux_join (int pid)
1009 {
1010 int status, ret;
1011
1012 do {
1013 ret = my_waitpid (pid, &status, 0);
1014 if (WIFEXITED (status) || WIFSIGNALED (status))
1015 break;
1016 } while (ret != -1 || errno != ECHILD);
1017 }
1018
1019 /* Return nonzero if the given thread is still alive. */
1020 static int
1021 linux_thread_alive (ptid_t ptid)
1022 {
1023 struct lwp_info *lwp = find_lwp_pid (ptid);
1024
1025 /* We assume we always know if a thread exits. If a whole process
1026 exited but we still haven't been able to report it to GDB, we'll
1027 hold on to the last lwp of the dead process. */
1028 if (lwp != NULL)
1029 return !lwp->dead;
1030 else
1031 return 0;
1032 }
1033
1034 /* Return 1 if this lwp has an interesting status pending. */
1035 static int
1036 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1037 {
1038 struct lwp_info *lwp = (struct lwp_info *) entry;
1039 ptid_t ptid = * (ptid_t *) arg;
1040 struct thread_info *thread;
1041
1042 /* Check if we're only interested in events from a specific process
1043 or its lwps. */
1044 if (!ptid_equal (minus_one_ptid, ptid)
1045 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1046 return 0;
1047
1048 thread = get_lwp_thread (lwp);
1049
1050 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1051 report any status pending the LWP may have. */
1052 if (thread->last_resume_kind == resume_stop
1053 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1054 return 0;
1055
1056 return lwp->status_pending_p;
1057 }
1058
1059 static int
1060 same_lwp (struct inferior_list_entry *entry, void *data)
1061 {
1062 ptid_t ptid = *(ptid_t *) data;
1063 int lwp;
1064
1065 if (ptid_get_lwp (ptid) != 0)
1066 lwp = ptid_get_lwp (ptid);
1067 else
1068 lwp = ptid_get_pid (ptid);
1069
1070 if (ptid_get_lwp (entry->id) == lwp)
1071 return 1;
1072
1073 return 0;
1074 }
1075
1076 struct lwp_info *
1077 find_lwp_pid (ptid_t ptid)
1078 {
1079 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1080 }
1081
1082 static struct lwp_info *
1083 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1084 {
1085 int ret;
1086 int to_wait_for = -1;
1087 struct lwp_info *child = NULL;
1088
1089 if (debug_threads)
1090 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1091
1092 if (ptid_equal (ptid, minus_one_ptid))
1093 to_wait_for = -1; /* any child */
1094 else
1095 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1096
1097 options |= __WALL;
1098
1099 retry:
1100
1101 ret = my_waitpid (to_wait_for, wstatp, options);
1102 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1103 return NULL;
1104 else if (ret == -1)
1105 perror_with_name ("waitpid");
1106
1107 if (debug_threads
1108 && (!WIFSTOPPED (*wstatp)
1109 || (WSTOPSIG (*wstatp) != 32
1110 && WSTOPSIG (*wstatp) != 33)))
1111 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1112
1113 child = find_lwp_pid (pid_to_ptid (ret));
1114
1115 /* If we didn't find a process, one of two things presumably happened:
1116 - A process we started and then detached from has exited. Ignore it.
1117 - A process we are controlling has forked and the new child's stop
1118 was reported to us by the kernel. Save its PID. */
1119 if (child == NULL && WIFSTOPPED (*wstatp))
1120 {
1121 add_pid_to_list (&stopped_pids, ret);
1122 goto retry;
1123 }
1124 else if (child == NULL)
1125 goto retry;
1126
1127 child->stopped = 1;
1128
1129 child->last_status = *wstatp;
1130
1131 /* Architecture-specific setup after inferior is running.
1132 This needs to happen after we have attached to the inferior
1133 and it is stopped for the first time, but before we access
1134 any inferior registers. */
1135 if (new_inferior)
1136 {
1137 the_low_target.arch_setup ();
1138 #ifdef HAVE_LINUX_REGSETS
1139 memset (disabled_regsets, 0, num_regsets);
1140 #endif
1141 new_inferior = 0;
1142 }
1143
1144 /* Fetch the possibly triggered data watchpoint info and store it in
1145 CHILD.
1146
1147 On some archs, like x86, that use debug registers to set
1148 watchpoints, it's possible that the way to know which watched
1149 address trapped, is to check the register that is used to select
1150 which address to watch. Problem is, between setting the
1151 watchpoint and reading back which data address trapped, the user
1152 may change the set of watchpoints, and, as a consequence, GDB
1153 changes the debug registers in the inferior. To avoid reading
1154 back a stale stopped-data-address when that happens, we cache in
1155 LP the fact that a watchpoint trapped, and the corresponding data
1156 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1157 changes the debug registers meanwhile, we have the cached data we
1158 can rely on. */
1159
1160 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1161 {
1162 if (the_low_target.stopped_by_watchpoint == NULL)
1163 {
1164 child->stopped_by_watchpoint = 0;
1165 }
1166 else
1167 {
1168 struct thread_info *saved_inferior;
1169
1170 saved_inferior = current_inferior;
1171 current_inferior = get_lwp_thread (child);
1172
1173 child->stopped_by_watchpoint
1174 = the_low_target.stopped_by_watchpoint ();
1175
1176 if (child->stopped_by_watchpoint)
1177 {
1178 if (the_low_target.stopped_data_address != NULL)
1179 child->stopped_data_address
1180 = the_low_target.stopped_data_address ();
1181 else
1182 child->stopped_data_address = 0;
1183 }
1184
1185 current_inferior = saved_inferior;
1186 }
1187 }
1188
1189 /* Store the STOP_PC, with adjustment applied. This depends on the
1190 architecture being defined already (so that CHILD has a valid
1191 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1192 not). */
1193 if (WIFSTOPPED (*wstatp))
1194 child->stop_pc = get_stop_pc (child);
1195
1196 if (debug_threads
1197 && WIFSTOPPED (*wstatp)
1198 && the_low_target.get_pc != NULL)
1199 {
1200 struct thread_info *saved_inferior = current_inferior;
1201 struct regcache *regcache;
1202 CORE_ADDR pc;
1203
1204 current_inferior = get_lwp_thread (child);
1205 regcache = get_thread_regcache (current_inferior, 1);
1206 pc = (*the_low_target.get_pc) (regcache);
1207 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1208 current_inferior = saved_inferior;
1209 }
1210
1211 return child;
1212 }
1213
1214 /* This function should only be called if the LWP got a SIGTRAP.
1215
1216 Handle any tracepoint steps or hits. Return true if a tracepoint
1217 event was handled, 0 otherwise. */
1218
1219 static int
1220 handle_tracepoints (struct lwp_info *lwp)
1221 {
1222 struct thread_info *tinfo = get_lwp_thread (lwp);
1223 int tpoint_related_event = 0;
1224
1225 /* If this tracepoint hit causes a tracing stop, we'll immediately
1226 uninsert tracepoints. To do this, we temporarily pause all
1227 threads, unpatch away, and then unpause threads. We need to make
1228 sure the unpausing doesn't resume LWP too. */
1229 lwp->suspended++;
1230
1231 /* And we need to be sure that any all-threads-stopping doesn't try
1232 to move threads out of the jump pads, as it could deadlock the
1233 inferior (LWP could be in the jump pad, maybe even holding the
1234 lock.) */
1235
1236 /* Do any necessary step collect actions. */
1237 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1238
1239 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1240
1241 /* See if we just hit a tracepoint and do its main collect
1242 actions. */
1243 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1244
1245 lwp->suspended--;
1246
1247 gdb_assert (lwp->suspended == 0);
1248 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1249
1250 if (tpoint_related_event)
1251 {
1252 if (debug_threads)
1253 fprintf (stderr, "got a tracepoint event\n");
1254 return 1;
1255 }
1256
1257 return 0;
1258 }
1259
1260 /* Convenience wrapper. Returns true if LWP is presently collecting a
1261 fast tracepoint. */
1262
1263 static int
1264 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1265 struct fast_tpoint_collect_status *status)
1266 {
1267 CORE_ADDR thread_area;
1268
1269 if (the_low_target.get_thread_area == NULL)
1270 return 0;
1271
1272 /* Get the thread area address. This is used to recognize which
1273 thread is which when tracing with the in-process agent library.
1274 We don't read anything from the address, and treat it as opaque;
1275 it's the address itself that we assume is unique per-thread. */
1276 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1277 return 0;
1278
1279 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1280 }
1281
1282 /* The reason we resume in the caller, is because we want to be able
1283 to pass lwp->status_pending as WSTAT, and we need to clear
1284 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1285 refuses to resume. */
1286
1287 static int
1288 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1289 {
1290 struct thread_info *saved_inferior;
1291
1292 saved_inferior = current_inferior;
1293 current_inferior = get_lwp_thread (lwp);
1294
1295 if ((wstat == NULL
1296 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1297 && supports_fast_tracepoints ()
1298 && in_process_agent_loaded ())
1299 {
1300 struct fast_tpoint_collect_status status;
1301 int r;
1302
1303 if (debug_threads)
1304 fprintf (stderr, "\
1305 Checking whether LWP %ld needs to move out of the jump pad.\n",
1306 lwpid_of (lwp));
1307
1308 r = linux_fast_tracepoint_collecting (lwp, &status);
1309
1310 if (wstat == NULL
1311 || (WSTOPSIG (*wstat) != SIGILL
1312 && WSTOPSIG (*wstat) != SIGFPE
1313 && WSTOPSIG (*wstat) != SIGSEGV
1314 && WSTOPSIG (*wstat) != SIGBUS))
1315 {
1316 lwp->collecting_fast_tracepoint = r;
1317
1318 if (r != 0)
1319 {
1320 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1321 {
1322 /* Haven't executed the original instruction yet.
1323 Set breakpoint there, and wait till it's hit,
1324 then single-step until exiting the jump pad. */
1325 lwp->exit_jump_pad_bkpt
1326 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1327 }
1328
1329 if (debug_threads)
1330 fprintf (stderr, "\
1331 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1332 lwpid_of (lwp));
1333 current_inferior = saved_inferior;
1334
1335 return 1;
1336 }
1337 }
1338 else
1339 {
1340 /* If we get a synchronous signal while collecting, *and*
1341 while executing the (relocated) original instruction,
1342 reset the PC to point at the tpoint address, before
1343 reporting to GDB. Otherwise, it's an IPA lib bug: just
1344 report the signal to GDB, and pray for the best. */
1345
1346 lwp->collecting_fast_tracepoint = 0;
1347
1348 if (r != 0
1349 && (status.adjusted_insn_addr <= lwp->stop_pc
1350 && lwp->stop_pc < status.adjusted_insn_addr_end))
1351 {
1352 siginfo_t info;
1353 struct regcache *regcache;
1354
1355 /* The si_addr on a few signals references the address
1356 of the faulting instruction. Adjust that as
1357 well. */
1358 if ((WSTOPSIG (*wstat) == SIGILL
1359 || WSTOPSIG (*wstat) == SIGFPE
1360 || WSTOPSIG (*wstat) == SIGBUS
1361 || WSTOPSIG (*wstat) == SIGSEGV)
1362 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1363 /* Final check just to make sure we don't clobber
1364 the siginfo of non-kernel-sent signals. */
1365 && (uintptr_t) info.si_addr == lwp->stop_pc)
1366 {
1367 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1368 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1369 }
1370
1371 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1372 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1373 lwp->stop_pc = status.tpoint_addr;
1374
1375 /* Cancel any fast tracepoint lock this thread was
1376 holding. */
1377 force_unlock_trace_buffer ();
1378 }
1379
1380 if (lwp->exit_jump_pad_bkpt != NULL)
1381 {
1382 if (debug_threads)
1383 fprintf (stderr,
1384 "Cancelling fast exit-jump-pad: removing bkpt. "
1385 "stopping all threads momentarily.\n");
1386
1387 stop_all_lwps (1, lwp);
1388 cancel_breakpoints ();
1389
1390 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1391 lwp->exit_jump_pad_bkpt = NULL;
1392
1393 unstop_all_lwps (1, lwp);
1394
1395 gdb_assert (lwp->suspended >= 0);
1396 }
1397 }
1398 }
1399
1400 if (debug_threads)
1401 fprintf (stderr, "\
1402 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1403 lwpid_of (lwp));
1404
1405 current_inferior = saved_inferior;
1406 return 0;
1407 }
1408
1409 /* Enqueue one signal in the "signals to report later when out of the
1410 jump pad" list. */
1411
1412 static void
1413 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1414 {
1415 struct pending_signals *p_sig;
1416
1417 if (debug_threads)
1418 fprintf (stderr, "\
1419 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1420
1421 if (debug_threads)
1422 {
1423 struct pending_signals *sig;
1424
1425 for (sig = lwp->pending_signals_to_report;
1426 sig != NULL;
1427 sig = sig->prev)
1428 fprintf (stderr,
1429 " Already queued %d\n",
1430 sig->signal);
1431
1432 fprintf (stderr, " (no more currently queued signals)\n");
1433 }
1434
1435 /* Don't enqueue non-RT signals if they are already in the deferred
1436 queue. (SIGSTOP being the easiest signal to see ending up here
1437 twice) */
1438 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1439 {
1440 struct pending_signals *sig;
1441
1442 for (sig = lwp->pending_signals_to_report;
1443 sig != NULL;
1444 sig = sig->prev)
1445 {
1446 if (sig->signal == WSTOPSIG (*wstat))
1447 {
1448 if (debug_threads)
1449 fprintf (stderr,
1450 "Not requeuing already queued non-RT signal %d"
1451 " for LWP %ld\n",
1452 sig->signal,
1453 lwpid_of (lwp));
1454 return;
1455 }
1456 }
1457 }
1458
1459 p_sig = xmalloc (sizeof (*p_sig));
1460 p_sig->prev = lwp->pending_signals_to_report;
1461 p_sig->signal = WSTOPSIG (*wstat);
1462 memset (&p_sig->info, 0, sizeof (siginfo_t));
1463 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1464
1465 lwp->pending_signals_to_report = p_sig;
1466 }
1467
1468 /* Dequeue one signal from the "signals to report later when out of
1469 the jump pad" list. */
1470
1471 static int
1472 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1473 {
1474 if (lwp->pending_signals_to_report != NULL)
1475 {
1476 struct pending_signals **p_sig;
1477
1478 p_sig = &lwp->pending_signals_to_report;
1479 while ((*p_sig)->prev != NULL)
1480 p_sig = &(*p_sig)->prev;
1481
1482 *wstat = W_STOPCODE ((*p_sig)->signal);
1483 if ((*p_sig)->info.si_signo != 0)
1484 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1485 free (*p_sig);
1486 *p_sig = NULL;
1487
1488 if (debug_threads)
1489 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1490 WSTOPSIG (*wstat), lwpid_of (lwp));
1491
1492 if (debug_threads)
1493 {
1494 struct pending_signals *sig;
1495
1496 for (sig = lwp->pending_signals_to_report;
1497 sig != NULL;
1498 sig = sig->prev)
1499 fprintf (stderr,
1500 " Still queued %d\n",
1501 sig->signal);
1502
1503 fprintf (stderr, " (no more queued signals)\n");
1504 }
1505
1506 return 1;
1507 }
1508
1509 return 0;
1510 }
1511
1512 /* Arrange for a breakpoint to be hit again later. We don't keep the
1513 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1514 will handle the current event, eventually we will resume this LWP,
1515 and this breakpoint will trap again. */
1516
1517 static int
1518 cancel_breakpoint (struct lwp_info *lwp)
1519 {
1520 struct thread_info *saved_inferior;
1521
1522 /* There's nothing to do if we don't support breakpoints. */
1523 if (!supports_breakpoints ())
1524 return 0;
1525
1526 /* breakpoint_at reads from current inferior. */
1527 saved_inferior = current_inferior;
1528 current_inferior = get_lwp_thread (lwp);
1529
1530 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1531 {
1532 if (debug_threads)
1533 fprintf (stderr,
1534 "CB: Push back breakpoint for %s\n",
1535 target_pid_to_str (ptid_of (lwp)));
1536
1537 /* Back up the PC if necessary. */
1538 if (the_low_target.decr_pc_after_break)
1539 {
1540 struct regcache *regcache
1541 = get_thread_regcache (current_inferior, 1);
1542 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1543 }
1544
1545 current_inferior = saved_inferior;
1546 return 1;
1547 }
1548 else
1549 {
1550 if (debug_threads)
1551 fprintf (stderr,
1552 "CB: No breakpoint found at %s for [%s]\n",
1553 paddress (lwp->stop_pc),
1554 target_pid_to_str (ptid_of (lwp)));
1555 }
1556
1557 current_inferior = saved_inferior;
1558 return 0;
1559 }
1560
1561 /* When the event-loop is doing a step-over, this points at the thread
1562 being stepped. */
1563 ptid_t step_over_bkpt;
1564
1565 /* Wait for an event from child PID. If PID is -1, wait for any
1566 child. Store the stop status through the status pointer WSTAT.
1567 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1568 event was found and OPTIONS contains WNOHANG. Return the PID of
1569 the stopped child otherwise. */
1570
1571 static int
1572 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1573 {
1574 struct lwp_info *event_child, *requested_child;
1575
1576 event_child = NULL;
1577 requested_child = NULL;
1578
1579 /* Check for a lwp with a pending status. */
1580
1581 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1582 {
1583 event_child = (struct lwp_info *)
1584 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1585 if (debug_threads && event_child)
1586 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1587 }
1588 else
1589 {
1590 requested_child = find_lwp_pid (ptid);
1591
1592 if (!stopping_threads
1593 && requested_child->status_pending_p
1594 && requested_child->collecting_fast_tracepoint)
1595 {
1596 enqueue_one_deferred_signal (requested_child,
1597 &requested_child->status_pending);
1598 requested_child->status_pending_p = 0;
1599 requested_child->status_pending = 0;
1600 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1601 }
1602
1603 if (requested_child->suspended
1604 && requested_child->status_pending_p)
1605 fatal ("requesting an event out of a suspended child?");
1606
1607 if (requested_child->status_pending_p)
1608 event_child = requested_child;
1609 }
1610
1611 if (event_child != NULL)
1612 {
1613 if (debug_threads)
1614 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1615 lwpid_of (event_child), event_child->status_pending);
1616 *wstat = event_child->status_pending;
1617 event_child->status_pending_p = 0;
1618 event_child->status_pending = 0;
1619 current_inferior = get_lwp_thread (event_child);
1620 return lwpid_of (event_child);
1621 }
1622
1623 /* We only enter this loop if no process has a pending wait status. Thus
1624 any action taken in response to a wait status inside this loop is
1625 responding as soon as we detect the status, not after any pending
1626 events. */
1627 while (1)
1628 {
1629 event_child = linux_wait_for_lwp (ptid, wstat, options);
1630
1631 if ((options & WNOHANG) && event_child == NULL)
1632 {
1633 if (debug_threads)
1634 fprintf (stderr, "WNOHANG set, no event found\n");
1635 return 0;
1636 }
1637
1638 if (event_child == NULL)
1639 error ("event from unknown child");
1640
1641 current_inferior = get_lwp_thread (event_child);
1642
1643 /* Check for thread exit. */
1644 if (! WIFSTOPPED (*wstat))
1645 {
1646 if (debug_threads)
1647 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1648
1649 /* If the last thread is exiting, just return. */
1650 if (last_thread_of_process_p (current_inferior))
1651 {
1652 if (debug_threads)
1653 fprintf (stderr, "LWP %ld is last lwp of process\n",
1654 lwpid_of (event_child));
1655 return lwpid_of (event_child);
1656 }
1657
1658 if (!non_stop)
1659 {
1660 current_inferior = (struct thread_info *) all_threads.head;
1661 if (debug_threads)
1662 fprintf (stderr, "Current inferior is now %ld\n",
1663 lwpid_of (get_thread_lwp (current_inferior)));
1664 }
1665 else
1666 {
1667 current_inferior = NULL;
1668 if (debug_threads)
1669 fprintf (stderr, "Current inferior is now <NULL>\n");
1670 }
1671
1672 /* If we were waiting for this particular child to do something...
1673 well, it did something. */
1674 if (requested_child != NULL)
1675 {
1676 int lwpid = lwpid_of (event_child);
1677
1678 /* Cancel the step-over operation --- the thread that
1679 started it is gone. */
1680 if (finish_step_over (event_child))
1681 unstop_all_lwps (1, event_child);
1682 delete_lwp (event_child);
1683 return lwpid;
1684 }
1685
1686 delete_lwp (event_child);
1687
1688 /* Wait for a more interesting event. */
1689 continue;
1690 }
1691
1692 if (event_child->must_set_ptrace_flags)
1693 {
1694 linux_enable_event_reporting (lwpid_of (event_child));
1695 event_child->must_set_ptrace_flags = 0;
1696 }
1697
1698 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1699 && *wstat >> 16 != 0)
1700 {
1701 handle_extended_wait (event_child, *wstat);
1702 continue;
1703 }
1704
1705 if (WIFSTOPPED (*wstat)
1706 && WSTOPSIG (*wstat) == SIGSTOP
1707 && event_child->stop_expected)
1708 {
1709 int should_stop;
1710
1711 if (debug_threads)
1712 fprintf (stderr, "Expected stop.\n");
1713 event_child->stop_expected = 0;
1714
1715 should_stop = (current_inferior->last_resume_kind == resume_stop
1716 || stopping_threads);
1717
1718 if (!should_stop)
1719 {
1720 linux_resume_one_lwp (event_child,
1721 event_child->stepping, 0, NULL);
1722 continue;
1723 }
1724 }
1725
1726 return lwpid_of (event_child);
1727 }
1728
1729 /* NOTREACHED */
1730 return 0;
1731 }
1732
1733 static int
1734 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1735 {
1736 ptid_t wait_ptid;
1737
1738 if (ptid_is_pid (ptid))
1739 {
1740 /* A request to wait for a specific tgid. This is not possible
1741 with waitpid, so instead, we wait for any child, and leave
1742 children we're not interested in right now with a pending
1743 status to report later. */
1744 wait_ptid = minus_one_ptid;
1745 }
1746 else
1747 wait_ptid = ptid;
1748
1749 while (1)
1750 {
1751 int event_pid;
1752
1753 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1754
1755 if (event_pid > 0
1756 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1757 {
1758 struct lwp_info *event_child
1759 = find_lwp_pid (pid_to_ptid (event_pid));
1760
1761 if (! WIFSTOPPED (*wstat))
1762 mark_lwp_dead (event_child, *wstat);
1763 else
1764 {
1765 event_child->status_pending_p = 1;
1766 event_child->status_pending = *wstat;
1767 }
1768 }
1769 else
1770 return event_pid;
1771 }
1772 }
1773
1774
1775 /* Count the LWP's that have had events. */
1776
1777 static int
1778 count_events_callback (struct inferior_list_entry *entry, void *data)
1779 {
1780 struct lwp_info *lp = (struct lwp_info *) entry;
1781 struct thread_info *thread = get_lwp_thread (lp);
1782 int *count = data;
1783
1784 gdb_assert (count != NULL);
1785
1786 /* Count only resumed LWPs that have a SIGTRAP event pending that
1787 should be reported to GDB. */
1788 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1789 && thread->last_resume_kind != resume_stop
1790 && lp->status_pending_p
1791 && WIFSTOPPED (lp->status_pending)
1792 && WSTOPSIG (lp->status_pending) == SIGTRAP
1793 && !breakpoint_inserted_here (lp->stop_pc))
1794 (*count)++;
1795
1796 return 0;
1797 }
1798
1799 /* Select the LWP (if any) that is currently being single-stepped. */
1800
1801 static int
1802 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1803 {
1804 struct lwp_info *lp = (struct lwp_info *) entry;
1805 struct thread_info *thread = get_lwp_thread (lp);
1806
1807 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1808 && thread->last_resume_kind == resume_step
1809 && lp->status_pending_p)
1810 return 1;
1811 else
1812 return 0;
1813 }
1814
1815 /* Select the Nth LWP that has had a SIGTRAP event that should be
1816 reported to GDB. */
1817
1818 static int
1819 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1820 {
1821 struct lwp_info *lp = (struct lwp_info *) entry;
1822 struct thread_info *thread = get_lwp_thread (lp);
1823 int *selector = data;
1824
1825 gdb_assert (selector != NULL);
1826
1827 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1828 if (thread->last_resume_kind != resume_stop
1829 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1830 && lp->status_pending_p
1831 && WIFSTOPPED (lp->status_pending)
1832 && WSTOPSIG (lp->status_pending) == SIGTRAP
1833 && !breakpoint_inserted_here (lp->stop_pc))
1834 if ((*selector)-- == 0)
1835 return 1;
1836
1837 return 0;
1838 }
1839
1840 static int
1841 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1842 {
1843 struct lwp_info *lp = (struct lwp_info *) entry;
1844 struct thread_info *thread = get_lwp_thread (lp);
1845 struct lwp_info *event_lp = data;
1846
1847 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1848 if (lp == event_lp)
1849 return 0;
1850
1851 /* If a LWP other than the LWP that we're reporting an event for has
1852 hit a GDB breakpoint (as opposed to some random trap signal),
1853 then just arrange for it to hit it again later. We don't keep
1854 the SIGTRAP status and don't forward the SIGTRAP signal to the
1855 LWP. We will handle the current event, eventually we will resume
1856 all LWPs, and this one will get its breakpoint trap again.
1857
1858 If we do not do this, then we run the risk that the user will
1859 delete or disable the breakpoint, but the LWP will have already
1860 tripped on it. */
1861
1862 if (thread->last_resume_kind != resume_stop
1863 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1864 && lp->status_pending_p
1865 && WIFSTOPPED (lp->status_pending)
1866 && WSTOPSIG (lp->status_pending) == SIGTRAP
1867 && !lp->stepping
1868 && !lp->stopped_by_watchpoint
1869 && cancel_breakpoint (lp))
1870 /* Throw away the SIGTRAP. */
1871 lp->status_pending_p = 0;
1872
1873 return 0;
1874 }
1875
1876 static void
1877 linux_cancel_breakpoints (void)
1878 {
1879 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1880 }
1881
1882 /* Select one LWP out of those that have events pending. */
1883
1884 static void
1885 select_event_lwp (struct lwp_info **orig_lp)
1886 {
1887 int num_events = 0;
1888 int random_selector;
1889 struct lwp_info *event_lp;
1890
1891 /* Give preference to any LWP that is being single-stepped. */
1892 event_lp
1893 = (struct lwp_info *) find_inferior (&all_lwps,
1894 select_singlestep_lwp_callback, NULL);
1895 if (event_lp != NULL)
1896 {
1897 if (debug_threads)
1898 fprintf (stderr,
1899 "SEL: Select single-step %s\n",
1900 target_pid_to_str (ptid_of (event_lp)));
1901 }
1902 else
1903 {
1904 /* No single-stepping LWP. Select one at random, out of those
1905 which have had SIGTRAP events. */
1906
1907 /* First see how many SIGTRAP events we have. */
1908 find_inferior (&all_lwps, count_events_callback, &num_events);
1909
1910 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1911 random_selector = (int)
1912 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1913
1914 if (debug_threads && num_events > 1)
1915 fprintf (stderr,
1916 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1917 num_events, random_selector);
1918
1919 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1920 select_event_lwp_callback,
1921 &random_selector);
1922 }
1923
1924 if (event_lp != NULL)
1925 {
1926 /* Switch the event LWP. */
1927 *orig_lp = event_lp;
1928 }
1929 }
1930
1931 /* Decrement the suspend count of an LWP. */
1932
1933 static int
1934 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1935 {
1936 struct lwp_info *lwp = (struct lwp_info *) entry;
1937
1938 /* Ignore EXCEPT. */
1939 if (lwp == except)
1940 return 0;
1941
1942 lwp->suspended--;
1943
1944 gdb_assert (lwp->suspended >= 0);
1945 return 0;
1946 }
1947
1948 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1949 NULL. */
1950
1951 static void
1952 unsuspend_all_lwps (struct lwp_info *except)
1953 {
1954 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1955 }
1956
1957 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1958 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1959 void *data);
1960 static int lwp_running (struct inferior_list_entry *entry, void *data);
1961 static ptid_t linux_wait_1 (ptid_t ptid,
1962 struct target_waitstatus *ourstatus,
1963 int target_options);
1964
1965 /* Stabilize threads (move out of jump pads).
1966
1967 If a thread is midway collecting a fast tracepoint, we need to
1968 finish the collection and move it out of the jump pad before
1969 reporting the signal.
1970
1971 This avoids recursion while collecting (when a signal arrives
1972 midway, and the signal handler itself collects), which would trash
1973 the trace buffer. In case the user set a breakpoint in a signal
1974 handler, this avoids the backtrace showing the jump pad, etc..
1975 Most importantly, there are certain things we can't do safely if
1976 threads are stopped in a jump pad (or in its callee's). For
1977 example:
1978
1979 - starting a new trace run. A thread still collecting the
1980 previous run, could trash the trace buffer when resumed. The trace
1981 buffer control structures would have been reset but the thread had
1982 no way to tell. The thread could even midway memcpy'ing to the
1983 buffer, which would mean that when resumed, it would clobber the
1984 trace buffer that had been set for a new run.
1985
1986 - we can't rewrite/reuse the jump pads for new tracepoints
1987 safely. Say you do tstart while a thread is stopped midway while
1988 collecting. When the thread is later resumed, it finishes the
1989 collection, and returns to the jump pad, to execute the original
1990 instruction that was under the tracepoint jump at the time the
1991 older run had been started. If the jump pad had been rewritten
1992 since for something else in the new run, the thread would now
1993 execute the wrong / random instructions. */
1994
1995 static void
1996 linux_stabilize_threads (void)
1997 {
1998 struct thread_info *save_inferior;
1999 struct lwp_info *lwp_stuck;
2000
2001 lwp_stuck
2002 = (struct lwp_info *) find_inferior (&all_lwps,
2003 stuck_in_jump_pad_callback, NULL);
2004 if (lwp_stuck != NULL)
2005 {
2006 if (debug_threads)
2007 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2008 lwpid_of (lwp_stuck));
2009 return;
2010 }
2011
2012 save_inferior = current_inferior;
2013
2014 stabilizing_threads = 1;
2015
2016 /* Kick 'em all. */
2017 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2018
2019 /* Loop until all are stopped out of the jump pads. */
2020 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2021 {
2022 struct target_waitstatus ourstatus;
2023 struct lwp_info *lwp;
2024 int wstat;
2025
2026 /* Note that we go through the full wait even loop. While
2027 moving threads out of jump pad, we need to be able to step
2028 over internal breakpoints and such. */
2029 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2030
2031 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2032 {
2033 lwp = get_thread_lwp (current_inferior);
2034
2035 /* Lock it. */
2036 lwp->suspended++;
2037
2038 if (ourstatus.value.sig != TARGET_SIGNAL_0
2039 || current_inferior->last_resume_kind == resume_stop)
2040 {
2041 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2042 enqueue_one_deferred_signal (lwp, &wstat);
2043 }
2044 }
2045 }
2046
2047 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2048
2049 stabilizing_threads = 0;
2050
2051 current_inferior = save_inferior;
2052
2053 if (debug_threads)
2054 {
2055 lwp_stuck
2056 = (struct lwp_info *) find_inferior (&all_lwps,
2057 stuck_in_jump_pad_callback, NULL);
2058 if (lwp_stuck != NULL)
2059 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2060 lwpid_of (lwp_stuck));
2061 }
2062 }
2063
2064 /* Wait for process, returns status. */
2065
2066 static ptid_t
2067 linux_wait_1 (ptid_t ptid,
2068 struct target_waitstatus *ourstatus, int target_options)
2069 {
2070 int w;
2071 struct lwp_info *event_child;
2072 int options;
2073 int pid;
2074 int step_over_finished;
2075 int bp_explains_trap;
2076 int maybe_internal_trap;
2077 int report_to_gdb;
2078 int trace_event;
2079
2080 /* Translate generic target options into linux options. */
2081 options = __WALL;
2082 if (target_options & TARGET_WNOHANG)
2083 options |= WNOHANG;
2084
2085 retry:
2086 bp_explains_trap = 0;
2087 trace_event = 0;
2088 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2089
2090 /* If we were only supposed to resume one thread, only wait for
2091 that thread - if it's still alive. If it died, however - which
2092 can happen if we're coming from the thread death case below -
2093 then we need to make sure we restart the other threads. We could
2094 pick a thread at random or restart all; restarting all is less
2095 arbitrary. */
2096 if (!non_stop
2097 && !ptid_equal (cont_thread, null_ptid)
2098 && !ptid_equal (cont_thread, minus_one_ptid))
2099 {
2100 struct thread_info *thread;
2101
2102 thread = (struct thread_info *) find_inferior_id (&all_threads,
2103 cont_thread);
2104
2105 /* No stepping, no signal - unless one is pending already, of course. */
2106 if (thread == NULL)
2107 {
2108 struct thread_resume resume_info;
2109 resume_info.thread = minus_one_ptid;
2110 resume_info.kind = resume_continue;
2111 resume_info.sig = 0;
2112 linux_resume (&resume_info, 1);
2113 }
2114 else
2115 ptid = cont_thread;
2116 }
2117
2118 if (ptid_equal (step_over_bkpt, null_ptid))
2119 pid = linux_wait_for_event (ptid, &w, options);
2120 else
2121 {
2122 if (debug_threads)
2123 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2124 target_pid_to_str (step_over_bkpt));
2125 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2126 }
2127
2128 if (pid == 0) /* only if TARGET_WNOHANG */
2129 return null_ptid;
2130
2131 event_child = get_thread_lwp (current_inferior);
2132
2133 /* If we are waiting for a particular child, and it exited,
2134 linux_wait_for_event will return its exit status. Similarly if
2135 the last child exited. If this is not the last child, however,
2136 do not report it as exited until there is a 'thread exited' response
2137 available in the remote protocol. Instead, just wait for another event.
2138 This should be safe, because if the thread crashed we will already
2139 have reported the termination signal to GDB; that should stop any
2140 in-progress stepping operations, etc.
2141
2142 Report the exit status of the last thread to exit. This matches
2143 LinuxThreads' behavior. */
2144
2145 if (last_thread_of_process_p (current_inferior))
2146 {
2147 if (WIFEXITED (w) || WIFSIGNALED (w))
2148 {
2149 if (WIFEXITED (w))
2150 {
2151 ourstatus->kind = TARGET_WAITKIND_EXITED;
2152 ourstatus->value.integer = WEXITSTATUS (w);
2153
2154 if (debug_threads)
2155 fprintf (stderr,
2156 "\nChild exited with retcode = %x \n",
2157 WEXITSTATUS (w));
2158 }
2159 else
2160 {
2161 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2162 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2163
2164 if (debug_threads)
2165 fprintf (stderr,
2166 "\nChild terminated with signal = %x \n",
2167 WTERMSIG (w));
2168
2169 }
2170
2171 return ptid_of (event_child);
2172 }
2173 }
2174 else
2175 {
2176 if (!WIFSTOPPED (w))
2177 goto retry;
2178 }
2179
2180 /* If this event was not handled before, and is not a SIGTRAP, we
2181 report it. SIGILL and SIGSEGV are also treated as traps in case
2182 a breakpoint is inserted at the current PC. If this target does
2183 not support internal breakpoints at all, we also report the
2184 SIGTRAP without further processing; it's of no concern to us. */
2185 maybe_internal_trap
2186 = (supports_breakpoints ()
2187 && (WSTOPSIG (w) == SIGTRAP
2188 || ((WSTOPSIG (w) == SIGILL
2189 || WSTOPSIG (w) == SIGSEGV)
2190 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2191
2192 if (maybe_internal_trap)
2193 {
2194 /* Handle anything that requires bookkeeping before deciding to
2195 report the event or continue waiting. */
2196
2197 /* First check if we can explain the SIGTRAP with an internal
2198 breakpoint, or if we should possibly report the event to GDB.
2199 Do this before anything that may remove or insert a
2200 breakpoint. */
2201 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2202
2203 /* We have a SIGTRAP, possibly a step-over dance has just
2204 finished. If so, tweak the state machine accordingly,
2205 reinsert breakpoints and delete any reinsert (software
2206 single-step) breakpoints. */
2207 step_over_finished = finish_step_over (event_child);
2208
2209 /* Now invoke the callbacks of any internal breakpoints there. */
2210 check_breakpoints (event_child->stop_pc);
2211
2212 /* Handle tracepoint data collecting. This may overflow the
2213 trace buffer, and cause a tracing stop, removing
2214 breakpoints. */
2215 trace_event = handle_tracepoints (event_child);
2216
2217 if (bp_explains_trap)
2218 {
2219 /* If we stepped or ran into an internal breakpoint, we've
2220 already handled it. So next time we resume (from this
2221 PC), we should step over it. */
2222 if (debug_threads)
2223 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2224
2225 if (breakpoint_here (event_child->stop_pc))
2226 event_child->need_step_over = 1;
2227 }
2228 }
2229 else
2230 {
2231 /* We have some other signal, possibly a step-over dance was in
2232 progress, and it should be cancelled too. */
2233 step_over_finished = finish_step_over (event_child);
2234 }
2235
2236 /* We have all the data we need. Either report the event to GDB, or
2237 resume threads and keep waiting for more. */
2238
2239 /* If we're collecting a fast tracepoint, finish the collection and
2240 move out of the jump pad before delivering a signal. See
2241 linux_stabilize_threads. */
2242
2243 if (WIFSTOPPED (w)
2244 && WSTOPSIG (w) != SIGTRAP
2245 && supports_fast_tracepoints ()
2246 && in_process_agent_loaded ())
2247 {
2248 if (debug_threads)
2249 fprintf (stderr,
2250 "Got signal %d for LWP %ld. Check if we need "
2251 "to defer or adjust it.\n",
2252 WSTOPSIG (w), lwpid_of (event_child));
2253
2254 /* Allow debugging the jump pad itself. */
2255 if (current_inferior->last_resume_kind != resume_step
2256 && maybe_move_out_of_jump_pad (event_child, &w))
2257 {
2258 enqueue_one_deferred_signal (event_child, &w);
2259
2260 if (debug_threads)
2261 fprintf (stderr,
2262 "Signal %d for LWP %ld deferred (in jump pad)\n",
2263 WSTOPSIG (w), lwpid_of (event_child));
2264
2265 linux_resume_one_lwp (event_child, 0, 0, NULL);
2266 goto retry;
2267 }
2268 }
2269
2270 if (event_child->collecting_fast_tracepoint)
2271 {
2272 if (debug_threads)
2273 fprintf (stderr, "\
2274 LWP %ld was trying to move out of the jump pad (%d). \
2275 Check if we're already there.\n",
2276 lwpid_of (event_child),
2277 event_child->collecting_fast_tracepoint);
2278
2279 trace_event = 1;
2280
2281 event_child->collecting_fast_tracepoint
2282 = linux_fast_tracepoint_collecting (event_child, NULL);
2283
2284 if (event_child->collecting_fast_tracepoint != 1)
2285 {
2286 /* No longer need this breakpoint. */
2287 if (event_child->exit_jump_pad_bkpt != NULL)
2288 {
2289 if (debug_threads)
2290 fprintf (stderr,
2291 "No longer need exit-jump-pad bkpt; removing it."
2292 "stopping all threads momentarily.\n");
2293
2294 /* Other running threads could hit this breakpoint.
2295 We don't handle moribund locations like GDB does,
2296 instead we always pause all threads when removing
2297 breakpoints, so that any step-over or
2298 decr_pc_after_break adjustment is always taken
2299 care of while the breakpoint is still
2300 inserted. */
2301 stop_all_lwps (1, event_child);
2302 cancel_breakpoints ();
2303
2304 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2305 event_child->exit_jump_pad_bkpt = NULL;
2306
2307 unstop_all_lwps (1, event_child);
2308
2309 gdb_assert (event_child->suspended >= 0);
2310 }
2311 }
2312
2313 if (event_child->collecting_fast_tracepoint == 0)
2314 {
2315 if (debug_threads)
2316 fprintf (stderr,
2317 "fast tracepoint finished "
2318 "collecting successfully.\n");
2319
2320 /* We may have a deferred signal to report. */
2321 if (dequeue_one_deferred_signal (event_child, &w))
2322 {
2323 if (debug_threads)
2324 fprintf (stderr, "dequeued one signal.\n");
2325 }
2326 else
2327 {
2328 if (debug_threads)
2329 fprintf (stderr, "no deferred signals.\n");
2330
2331 if (stabilizing_threads)
2332 {
2333 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2334 ourstatus->value.sig = TARGET_SIGNAL_0;
2335 return ptid_of (event_child);
2336 }
2337 }
2338 }
2339 }
2340
2341 /* Check whether GDB would be interested in this event. */
2342
2343 /* If GDB is not interested in this signal, don't stop other
2344 threads, and don't report it to GDB. Just resume the inferior
2345 right away. We do this for threading-related signals as well as
2346 any that GDB specifically requested we ignore. But never ignore
2347 SIGSTOP if we sent it ourselves, and do not ignore signals when
2348 stepping - they may require special handling to skip the signal
2349 handler. */
2350 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2351 thread library? */
2352 if (WIFSTOPPED (w)
2353 && current_inferior->last_resume_kind != resume_step
2354 && (
2355 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2356 (current_process ()->private->thread_db != NULL
2357 && (WSTOPSIG (w) == __SIGRTMIN
2358 || WSTOPSIG (w) == __SIGRTMIN + 1))
2359 ||
2360 #endif
2361 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2362 && !(WSTOPSIG (w) == SIGSTOP
2363 && current_inferior->last_resume_kind == resume_stop))))
2364 {
2365 siginfo_t info, *info_p;
2366
2367 if (debug_threads)
2368 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2369 WSTOPSIG (w), lwpid_of (event_child));
2370
2371 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2372 info_p = &info;
2373 else
2374 info_p = NULL;
2375 linux_resume_one_lwp (event_child, event_child->stepping,
2376 WSTOPSIG (w), info_p);
2377 goto retry;
2378 }
2379
2380 /* If GDB wanted this thread to single step, we always want to
2381 report the SIGTRAP, and let GDB handle it. Watchpoints should
2382 always be reported. So should signals we can't explain. A
2383 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2384 not support Z0 breakpoints. If we do, we're be able to handle
2385 GDB breakpoints on top of internal breakpoints, by handling the
2386 internal breakpoint and still reporting the event to GDB. If we
2387 don't, we're out of luck, GDB won't see the breakpoint hit. */
2388 report_to_gdb = (!maybe_internal_trap
2389 || current_inferior->last_resume_kind == resume_step
2390 || event_child->stopped_by_watchpoint
2391 || (!step_over_finished
2392 && !bp_explains_trap && !trace_event)
2393 || gdb_breakpoint_here (event_child->stop_pc));
2394
2395 /* We found no reason GDB would want us to stop. We either hit one
2396 of our own breakpoints, or finished an internal step GDB
2397 shouldn't know about. */
2398 if (!report_to_gdb)
2399 {
2400 if (debug_threads)
2401 {
2402 if (bp_explains_trap)
2403 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2404 if (step_over_finished)
2405 fprintf (stderr, "Step-over finished.\n");
2406 if (trace_event)
2407 fprintf (stderr, "Tracepoint event.\n");
2408 }
2409
2410 /* We're not reporting this breakpoint to GDB, so apply the
2411 decr_pc_after_break adjustment to the inferior's regcache
2412 ourselves. */
2413
2414 if (the_low_target.set_pc != NULL)
2415 {
2416 struct regcache *regcache
2417 = get_thread_regcache (get_lwp_thread (event_child), 1);
2418 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2419 }
2420
2421 /* We may have finished stepping over a breakpoint. If so,
2422 we've stopped and suspended all LWPs momentarily except the
2423 stepping one. This is where we resume them all again. We're
2424 going to keep waiting, so use proceed, which handles stepping
2425 over the next breakpoint. */
2426 if (debug_threads)
2427 fprintf (stderr, "proceeding all threads.\n");
2428
2429 if (step_over_finished)
2430 unsuspend_all_lwps (event_child);
2431
2432 proceed_all_lwps ();
2433 goto retry;
2434 }
2435
2436 if (debug_threads)
2437 {
2438 if (current_inferior->last_resume_kind == resume_step)
2439 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2440 if (event_child->stopped_by_watchpoint)
2441 fprintf (stderr, "Stopped by watchpoint.\n");
2442 if (gdb_breakpoint_here (event_child->stop_pc))
2443 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2444 if (debug_threads)
2445 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2446 }
2447
2448 /* Alright, we're going to report a stop. */
2449
2450 if (!non_stop && !stabilizing_threads)
2451 {
2452 /* In all-stop, stop all threads. */
2453 stop_all_lwps (0, NULL);
2454
2455 /* If we're not waiting for a specific LWP, choose an event LWP
2456 from among those that have had events. Giving equal priority
2457 to all LWPs that have had events helps prevent
2458 starvation. */
2459 if (ptid_equal (ptid, minus_one_ptid))
2460 {
2461 event_child->status_pending_p = 1;
2462 event_child->status_pending = w;
2463
2464 select_event_lwp (&event_child);
2465
2466 event_child->status_pending_p = 0;
2467 w = event_child->status_pending;
2468 }
2469
2470 /* Now that we've selected our final event LWP, cancel any
2471 breakpoints in other LWPs that have hit a GDB breakpoint.
2472 See the comment in cancel_breakpoints_callback to find out
2473 why. */
2474 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2475
2476 /* Stabilize threads (move out of jump pads). */
2477 stabilize_threads ();
2478 }
2479 else
2480 {
2481 /* If we just finished a step-over, then all threads had been
2482 momentarily paused. In all-stop, that's fine, we want
2483 threads stopped by now anyway. In non-stop, we need to
2484 re-resume threads that GDB wanted to be running. */
2485 if (step_over_finished)
2486 unstop_all_lwps (1, event_child);
2487 }
2488
2489 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2490
2491 if (current_inferior->last_resume_kind == resume_stop
2492 && WSTOPSIG (w) == SIGSTOP)
2493 {
2494 /* A thread that has been requested to stop by GDB with vCont;t,
2495 and it stopped cleanly, so report as SIG0. The use of
2496 SIGSTOP is an implementation detail. */
2497 ourstatus->value.sig = TARGET_SIGNAL_0;
2498 }
2499 else if (current_inferior->last_resume_kind == resume_stop
2500 && WSTOPSIG (w) != SIGSTOP)
2501 {
2502 /* A thread that has been requested to stop by GDB with vCont;t,
2503 but, it stopped for other reasons. */
2504 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2505 }
2506 else
2507 {
2508 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2509 }
2510
2511 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2512
2513 if (debug_threads)
2514 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2515 target_pid_to_str (ptid_of (event_child)),
2516 ourstatus->kind,
2517 ourstatus->value.sig);
2518
2519 return ptid_of (event_child);
2520 }
2521
2522 /* Get rid of any pending event in the pipe. */
2523 static void
2524 async_file_flush (void)
2525 {
2526 int ret;
2527 char buf;
2528
2529 do
2530 ret = read (linux_event_pipe[0], &buf, 1);
2531 while (ret >= 0 || (ret == -1 && errno == EINTR));
2532 }
2533
2534 /* Put something in the pipe, so the event loop wakes up. */
2535 static void
2536 async_file_mark (void)
2537 {
2538 int ret;
2539
2540 async_file_flush ();
2541
2542 do
2543 ret = write (linux_event_pipe[1], "+", 1);
2544 while (ret == 0 || (ret == -1 && errno == EINTR));
2545
2546 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2547 be awakened anyway. */
2548 }
2549
2550 static ptid_t
2551 linux_wait (ptid_t ptid,
2552 struct target_waitstatus *ourstatus, int target_options)
2553 {
2554 ptid_t event_ptid;
2555
2556 if (debug_threads)
2557 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2558
2559 /* Flush the async file first. */
2560 if (target_is_async_p ())
2561 async_file_flush ();
2562
2563 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2564
2565 /* If at least one stop was reported, there may be more. A single
2566 SIGCHLD can signal more than one child stop. */
2567 if (target_is_async_p ()
2568 && (target_options & TARGET_WNOHANG) != 0
2569 && !ptid_equal (event_ptid, null_ptid))
2570 async_file_mark ();
2571
2572 return event_ptid;
2573 }
2574
2575 /* Send a signal to an LWP. */
2576
2577 static int
2578 kill_lwp (unsigned long lwpid, int signo)
2579 {
2580 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2581 fails, then we are not using nptl threads and we should be using kill. */
2582
2583 #ifdef __NR_tkill
2584 {
2585 static int tkill_failed;
2586
2587 if (!tkill_failed)
2588 {
2589 int ret;
2590
2591 errno = 0;
2592 ret = syscall (__NR_tkill, lwpid, signo);
2593 if (errno != ENOSYS)
2594 return ret;
2595 tkill_failed = 1;
2596 }
2597 }
2598 #endif
2599
2600 return kill (lwpid, signo);
2601 }
2602
2603 void
2604 linux_stop_lwp (struct lwp_info *lwp)
2605 {
2606 send_sigstop (lwp);
2607 }
2608
2609 static void
2610 send_sigstop (struct lwp_info *lwp)
2611 {
2612 int pid;
2613
2614 pid = lwpid_of (lwp);
2615
2616 /* If we already have a pending stop signal for this process, don't
2617 send another. */
2618 if (lwp->stop_expected)
2619 {
2620 if (debug_threads)
2621 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2622
2623 return;
2624 }
2625
2626 if (debug_threads)
2627 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2628
2629 lwp->stop_expected = 1;
2630 kill_lwp (pid, SIGSTOP);
2631 }
2632
2633 static int
2634 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2635 {
2636 struct lwp_info *lwp = (struct lwp_info *) entry;
2637
2638 /* Ignore EXCEPT. */
2639 if (lwp == except)
2640 return 0;
2641
2642 if (lwp->stopped)
2643 return 0;
2644
2645 send_sigstop (lwp);
2646 return 0;
2647 }
2648
2649 /* Increment the suspend count of an LWP, and stop it, if not stopped
2650 yet. */
2651 static int
2652 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2653 void *except)
2654 {
2655 struct lwp_info *lwp = (struct lwp_info *) entry;
2656
2657 /* Ignore EXCEPT. */
2658 if (lwp == except)
2659 return 0;
2660
2661 lwp->suspended++;
2662
2663 return send_sigstop_callback (entry, except);
2664 }
2665
2666 static void
2667 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2668 {
2669 /* It's dead, really. */
2670 lwp->dead = 1;
2671
2672 /* Store the exit status for later. */
2673 lwp->status_pending_p = 1;
2674 lwp->status_pending = wstat;
2675
2676 /* Prevent trying to stop it. */
2677 lwp->stopped = 1;
2678
2679 /* No further stops are expected from a dead lwp. */
2680 lwp->stop_expected = 0;
2681 }
2682
2683 static void
2684 wait_for_sigstop (struct inferior_list_entry *entry)
2685 {
2686 struct lwp_info *lwp = (struct lwp_info *) entry;
2687 struct thread_info *saved_inferior;
2688 int wstat;
2689 ptid_t saved_tid;
2690 ptid_t ptid;
2691 int pid;
2692
2693 if (lwp->stopped)
2694 {
2695 if (debug_threads)
2696 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2697 lwpid_of (lwp));
2698 return;
2699 }
2700
2701 saved_inferior = current_inferior;
2702 if (saved_inferior != NULL)
2703 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2704 else
2705 saved_tid = null_ptid; /* avoid bogus unused warning */
2706
2707 ptid = lwp->head.id;
2708
2709 if (debug_threads)
2710 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2711
2712 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2713
2714 /* If we stopped with a non-SIGSTOP signal, save it for later
2715 and record the pending SIGSTOP. If the process exited, just
2716 return. */
2717 if (WIFSTOPPED (wstat))
2718 {
2719 if (debug_threads)
2720 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2721 lwpid_of (lwp), WSTOPSIG (wstat));
2722
2723 if (WSTOPSIG (wstat) != SIGSTOP)
2724 {
2725 if (debug_threads)
2726 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2727 lwpid_of (lwp), wstat);
2728
2729 lwp->status_pending_p = 1;
2730 lwp->status_pending = wstat;
2731 }
2732 }
2733 else
2734 {
2735 if (debug_threads)
2736 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2737
2738 lwp = find_lwp_pid (pid_to_ptid (pid));
2739 if (lwp)
2740 {
2741 /* Leave this status pending for the next time we're able to
2742 report it. In the mean time, we'll report this lwp as
2743 dead to GDB, so GDB doesn't try to read registers and
2744 memory from it. This can only happen if this was the
2745 last thread of the process; otherwise, PID is removed
2746 from the thread tables before linux_wait_for_event
2747 returns. */
2748 mark_lwp_dead (lwp, wstat);
2749 }
2750 }
2751
2752 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2753 current_inferior = saved_inferior;
2754 else
2755 {
2756 if (debug_threads)
2757 fprintf (stderr, "Previously current thread died.\n");
2758
2759 if (non_stop)
2760 {
2761 /* We can't change the current inferior behind GDB's back,
2762 otherwise, a subsequent command may apply to the wrong
2763 process. */
2764 current_inferior = NULL;
2765 }
2766 else
2767 {
2768 /* Set a valid thread as current. */
2769 set_desired_inferior (0);
2770 }
2771 }
2772 }
2773
2774 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2775 move it out, because we need to report the stop event to GDB. For
2776 example, if the user puts a breakpoint in the jump pad, it's
2777 because she wants to debug it. */
2778
2779 static int
2780 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2781 {
2782 struct lwp_info *lwp = (struct lwp_info *) entry;
2783 struct thread_info *thread = get_lwp_thread (lwp);
2784
2785 gdb_assert (lwp->suspended == 0);
2786 gdb_assert (lwp->stopped);
2787
2788 /* Allow debugging the jump pad, gdb_collect, etc.. */
2789 return (supports_fast_tracepoints ()
2790 && in_process_agent_loaded ()
2791 && (gdb_breakpoint_here (lwp->stop_pc)
2792 || lwp->stopped_by_watchpoint
2793 || thread->last_resume_kind == resume_step)
2794 && linux_fast_tracepoint_collecting (lwp, NULL));
2795 }
2796
2797 static void
2798 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2799 {
2800 struct lwp_info *lwp = (struct lwp_info *) entry;
2801 struct thread_info *thread = get_lwp_thread (lwp);
2802 int *wstat;
2803
2804 gdb_assert (lwp->suspended == 0);
2805 gdb_assert (lwp->stopped);
2806
2807 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2808
2809 /* Allow debugging the jump pad, gdb_collect, etc. */
2810 if (!gdb_breakpoint_here (lwp->stop_pc)
2811 && !lwp->stopped_by_watchpoint
2812 && thread->last_resume_kind != resume_step
2813 && maybe_move_out_of_jump_pad (lwp, wstat))
2814 {
2815 if (debug_threads)
2816 fprintf (stderr,
2817 "LWP %ld needs stabilizing (in jump pad)\n",
2818 lwpid_of (lwp));
2819
2820 if (wstat)
2821 {
2822 lwp->status_pending_p = 0;
2823 enqueue_one_deferred_signal (lwp, wstat);
2824
2825 if (debug_threads)
2826 fprintf (stderr,
2827 "Signal %d for LWP %ld deferred "
2828 "(in jump pad)\n",
2829 WSTOPSIG (*wstat), lwpid_of (lwp));
2830 }
2831
2832 linux_resume_one_lwp (lwp, 0, 0, NULL);
2833 }
2834 else
2835 lwp->suspended++;
2836 }
2837
2838 static int
2839 lwp_running (struct inferior_list_entry *entry, void *data)
2840 {
2841 struct lwp_info *lwp = (struct lwp_info *) entry;
2842
2843 if (lwp->dead)
2844 return 0;
2845 if (lwp->stopped)
2846 return 0;
2847 return 1;
2848 }
2849
2850 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2851 If SUSPEND, then also increase the suspend count of every LWP,
2852 except EXCEPT. */
2853
2854 static void
2855 stop_all_lwps (int suspend, struct lwp_info *except)
2856 {
2857 stopping_threads = 1;
2858
2859 if (suspend)
2860 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2861 else
2862 find_inferior (&all_lwps, send_sigstop_callback, except);
2863 for_each_inferior (&all_lwps, wait_for_sigstop);
2864 stopping_threads = 0;
2865 }
2866
2867 /* Resume execution of the inferior process.
2868 If STEP is nonzero, single-step it.
2869 If SIGNAL is nonzero, give it that signal. */
2870
2871 static void
2872 linux_resume_one_lwp (struct lwp_info *lwp,
2873 int step, int signal, siginfo_t *info)
2874 {
2875 struct thread_info *saved_inferior;
2876 int fast_tp_collecting;
2877
2878 if (lwp->stopped == 0)
2879 return;
2880
2881 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2882
2883 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2884
2885 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2886 user used the "jump" command, or "set $pc = foo"). */
2887 if (lwp->stop_pc != get_pc (lwp))
2888 {
2889 /* Collecting 'while-stepping' actions doesn't make sense
2890 anymore. */
2891 release_while_stepping_state_list (get_lwp_thread (lwp));
2892 }
2893
2894 /* If we have pending signals or status, and a new signal, enqueue the
2895 signal. Also enqueue the signal if we are waiting to reinsert a
2896 breakpoint; it will be picked up again below. */
2897 if (signal != 0
2898 && (lwp->status_pending_p
2899 || lwp->pending_signals != NULL
2900 || lwp->bp_reinsert != 0
2901 || fast_tp_collecting))
2902 {
2903 struct pending_signals *p_sig;
2904 p_sig = xmalloc (sizeof (*p_sig));
2905 p_sig->prev = lwp->pending_signals;
2906 p_sig->signal = signal;
2907 if (info == NULL)
2908 memset (&p_sig->info, 0, sizeof (siginfo_t));
2909 else
2910 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2911 lwp->pending_signals = p_sig;
2912 }
2913
2914 if (lwp->status_pending_p)
2915 {
2916 if (debug_threads)
2917 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2918 " has pending status\n",
2919 lwpid_of (lwp), step ? "step" : "continue", signal,
2920 lwp->stop_expected ? "expected" : "not expected");
2921 return;
2922 }
2923
2924 saved_inferior = current_inferior;
2925 current_inferior = get_lwp_thread (lwp);
2926
2927 if (debug_threads)
2928 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2929 lwpid_of (lwp), step ? "step" : "continue", signal,
2930 lwp->stop_expected ? "expected" : "not expected");
2931
2932 /* This bit needs some thinking about. If we get a signal that
2933 we must report while a single-step reinsert is still pending,
2934 we often end up resuming the thread. It might be better to
2935 (ew) allow a stack of pending events; then we could be sure that
2936 the reinsert happened right away and not lose any signals.
2937
2938 Making this stack would also shrink the window in which breakpoints are
2939 uninserted (see comment in linux_wait_for_lwp) but not enough for
2940 complete correctness, so it won't solve that problem. It may be
2941 worthwhile just to solve this one, however. */
2942 if (lwp->bp_reinsert != 0)
2943 {
2944 if (debug_threads)
2945 fprintf (stderr, " pending reinsert at 0x%s\n",
2946 paddress (lwp->bp_reinsert));
2947
2948 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2949 {
2950 if (fast_tp_collecting == 0)
2951 {
2952 if (step == 0)
2953 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2954 if (lwp->suspended)
2955 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2956 lwp->suspended);
2957 }
2958
2959 step = 1;
2960 }
2961
2962 /* Postpone any pending signal. It was enqueued above. */
2963 signal = 0;
2964 }
2965
2966 if (fast_tp_collecting == 1)
2967 {
2968 if (debug_threads)
2969 fprintf (stderr, "\
2970 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2971 lwpid_of (lwp));
2972
2973 /* Postpone any pending signal. It was enqueued above. */
2974 signal = 0;
2975 }
2976 else if (fast_tp_collecting == 2)
2977 {
2978 if (debug_threads)
2979 fprintf (stderr, "\
2980 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2981 lwpid_of (lwp));
2982
2983 if (can_hardware_single_step ())
2984 step = 1;
2985 else
2986 fatal ("moving out of jump pad single-stepping"
2987 " not implemented on this target");
2988
2989 /* Postpone any pending signal. It was enqueued above. */
2990 signal = 0;
2991 }
2992
2993 /* If we have while-stepping actions in this thread set it stepping.
2994 If we have a signal to deliver, it may or may not be set to
2995 SIG_IGN, we don't know. Assume so, and allow collecting
2996 while-stepping into a signal handler. A possible smart thing to
2997 do would be to set an internal breakpoint at the signal return
2998 address, continue, and carry on catching this while-stepping
2999 action only when that breakpoint is hit. A future
3000 enhancement. */
3001 if (get_lwp_thread (lwp)->while_stepping != NULL
3002 && can_hardware_single_step ())
3003 {
3004 if (debug_threads)
3005 fprintf (stderr,
3006 "lwp %ld has a while-stepping action -> forcing step.\n",
3007 lwpid_of (lwp));
3008 step = 1;
3009 }
3010
3011 if (debug_threads && the_low_target.get_pc != NULL)
3012 {
3013 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3014 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3015 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3016 }
3017
3018 /* If we have pending signals, consume one unless we are trying to
3019 reinsert a breakpoint or we're trying to finish a fast tracepoint
3020 collect. */
3021 if (lwp->pending_signals != NULL
3022 && lwp->bp_reinsert == 0
3023 && fast_tp_collecting == 0)
3024 {
3025 struct pending_signals **p_sig;
3026
3027 p_sig = &lwp->pending_signals;
3028 while ((*p_sig)->prev != NULL)
3029 p_sig = &(*p_sig)->prev;
3030
3031 signal = (*p_sig)->signal;
3032 if ((*p_sig)->info.si_signo != 0)
3033 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3034
3035 free (*p_sig);
3036 *p_sig = NULL;
3037 }
3038
3039 if (the_low_target.prepare_to_resume != NULL)
3040 the_low_target.prepare_to_resume (lwp);
3041
3042 regcache_invalidate_one ((struct inferior_list_entry *)
3043 get_lwp_thread (lwp));
3044 errno = 0;
3045 lwp->stopped = 0;
3046 lwp->stopped_by_watchpoint = 0;
3047 lwp->stepping = step;
3048 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3049 /* Coerce to a uintptr_t first to avoid potential gcc warning
3050 of coercing an 8 byte integer to a 4 byte pointer. */
3051 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3052
3053 current_inferior = saved_inferior;
3054 if (errno)
3055 {
3056 /* ESRCH from ptrace either means that the thread was already
3057 running (an error) or that it is gone (a race condition). If
3058 it's gone, we will get a notification the next time we wait,
3059 so we can ignore the error. We could differentiate these
3060 two, but it's tricky without waiting; the thread still exists
3061 as a zombie, so sending it signal 0 would succeed. So just
3062 ignore ESRCH. */
3063 if (errno == ESRCH)
3064 return;
3065
3066 perror_with_name ("ptrace");
3067 }
3068 }
3069
3070 struct thread_resume_array
3071 {
3072 struct thread_resume *resume;
3073 size_t n;
3074 };
3075
3076 /* This function is called once per thread. We look up the thread
3077 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3078 resume request.
3079
3080 This algorithm is O(threads * resume elements), but resume elements
3081 is small (and will remain small at least until GDB supports thread
3082 suspension). */
3083 static int
3084 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3085 {
3086 struct lwp_info *lwp;
3087 struct thread_info *thread;
3088 int ndx;
3089 struct thread_resume_array *r;
3090
3091 thread = (struct thread_info *) entry;
3092 lwp = get_thread_lwp (thread);
3093 r = arg;
3094
3095 for (ndx = 0; ndx < r->n; ndx++)
3096 {
3097 ptid_t ptid = r->resume[ndx].thread;
3098 if (ptid_equal (ptid, minus_one_ptid)
3099 || ptid_equal (ptid, entry->id)
3100 || (ptid_is_pid (ptid)
3101 && (ptid_get_pid (ptid) == pid_of (lwp)))
3102 || (ptid_get_lwp (ptid) == -1
3103 && (ptid_get_pid (ptid) == pid_of (lwp))))
3104 {
3105 if (r->resume[ndx].kind == resume_stop
3106 && thread->last_resume_kind == resume_stop)
3107 {
3108 if (debug_threads)
3109 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3110 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3111 ? "stopped"
3112 : "stopping",
3113 lwpid_of (lwp));
3114
3115 continue;
3116 }
3117
3118 lwp->resume = &r->resume[ndx];
3119 thread->last_resume_kind = lwp->resume->kind;
3120
3121 /* If we had a deferred signal to report, dequeue one now.
3122 This can happen if LWP gets more than one signal while
3123 trying to get out of a jump pad. */
3124 if (lwp->stopped
3125 && !lwp->status_pending_p
3126 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3127 {
3128 lwp->status_pending_p = 1;
3129
3130 if (debug_threads)
3131 fprintf (stderr,
3132 "Dequeueing deferred signal %d for LWP %ld, "
3133 "leaving status pending.\n",
3134 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3135 }
3136
3137 return 0;
3138 }
3139 }
3140
3141 /* No resume action for this thread. */
3142 lwp->resume = NULL;
3143
3144 return 0;
3145 }
3146
3147
3148 /* Set *FLAG_P if this lwp has an interesting status pending. */
3149 static int
3150 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3151 {
3152 struct lwp_info *lwp = (struct lwp_info *) entry;
3153
3154 /* LWPs which will not be resumed are not interesting, because
3155 we might not wait for them next time through linux_wait. */
3156 if (lwp->resume == NULL)
3157 return 0;
3158
3159 if (lwp->status_pending_p)
3160 * (int *) flag_p = 1;
3161
3162 return 0;
3163 }
3164
3165 /* Return 1 if this lwp that GDB wants running is stopped at an
3166 internal breakpoint that we need to step over. It assumes that any
3167 required STOP_PC adjustment has already been propagated to the
3168 inferior's regcache. */
3169
3170 static int
3171 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3172 {
3173 struct lwp_info *lwp = (struct lwp_info *) entry;
3174 struct thread_info *thread;
3175 struct thread_info *saved_inferior;
3176 CORE_ADDR pc;
3177
3178 /* LWPs which will not be resumed are not interesting, because we
3179 might not wait for them next time through linux_wait. */
3180
3181 if (!lwp->stopped)
3182 {
3183 if (debug_threads)
3184 fprintf (stderr,
3185 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3186 lwpid_of (lwp));
3187 return 0;
3188 }
3189
3190 thread = get_lwp_thread (lwp);
3191
3192 if (thread->last_resume_kind == resume_stop)
3193 {
3194 if (debug_threads)
3195 fprintf (stderr,
3196 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3197 lwpid_of (lwp));
3198 return 0;
3199 }
3200
3201 gdb_assert (lwp->suspended >= 0);
3202
3203 if (lwp->suspended)
3204 {
3205 if (debug_threads)
3206 fprintf (stderr,
3207 "Need step over [LWP %ld]? Ignoring, suspended\n",
3208 lwpid_of (lwp));
3209 return 0;
3210 }
3211
3212 if (!lwp->need_step_over)
3213 {
3214 if (debug_threads)
3215 fprintf (stderr,
3216 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3217 }
3218
3219 if (lwp->status_pending_p)
3220 {
3221 if (debug_threads)
3222 fprintf (stderr,
3223 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3224 lwpid_of (lwp));
3225 return 0;
3226 }
3227
3228 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3229 or we have. */
3230 pc = get_pc (lwp);
3231
3232 /* If the PC has changed since we stopped, then don't do anything,
3233 and let the breakpoint/tracepoint be hit. This happens if, for
3234 instance, GDB handled the decr_pc_after_break subtraction itself,
3235 GDB is OOL stepping this thread, or the user has issued a "jump"
3236 command, or poked thread's registers herself. */
3237 if (pc != lwp->stop_pc)
3238 {
3239 if (debug_threads)
3240 fprintf (stderr,
3241 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3242 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3243 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3244
3245 lwp->need_step_over = 0;
3246 return 0;
3247 }
3248
3249 saved_inferior = current_inferior;
3250 current_inferior = thread;
3251
3252 /* We can only step over breakpoints we know about. */
3253 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3254 {
3255 /* Don't step over a breakpoint that GDB expects to hit
3256 though. */
3257 if (gdb_breakpoint_here (pc))
3258 {
3259 if (debug_threads)
3260 fprintf (stderr,
3261 "Need step over [LWP %ld]? yes, but found"
3262 " GDB breakpoint at 0x%s; skipping step over\n",
3263 lwpid_of (lwp), paddress (pc));
3264
3265 current_inferior = saved_inferior;
3266 return 0;
3267 }
3268 else
3269 {
3270 if (debug_threads)
3271 fprintf (stderr,
3272 "Need step over [LWP %ld]? yes, "
3273 "found breakpoint at 0x%s\n",
3274 lwpid_of (lwp), paddress (pc));
3275
3276 /* We've found an lwp that needs stepping over --- return 1 so
3277 that find_inferior stops looking. */
3278 current_inferior = saved_inferior;
3279
3280 /* If the step over is cancelled, this is set again. */
3281 lwp->need_step_over = 0;
3282 return 1;
3283 }
3284 }
3285
3286 current_inferior = saved_inferior;
3287
3288 if (debug_threads)
3289 fprintf (stderr,
3290 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3291 lwpid_of (lwp), paddress (pc));
3292
3293 return 0;
3294 }
3295
3296 /* Start a step-over operation on LWP. When LWP stopped at a
3297 breakpoint, to make progress, we need to remove the breakpoint out
3298 of the way. If we let other threads run while we do that, they may
3299 pass by the breakpoint location and miss hitting it. To avoid
3300 that, a step-over momentarily stops all threads while LWP is
3301 single-stepped while the breakpoint is temporarily uninserted from
3302 the inferior. When the single-step finishes, we reinsert the
3303 breakpoint, and let all threads that are supposed to be running,
3304 run again.
3305
3306 On targets that don't support hardware single-step, we don't
3307 currently support full software single-stepping. Instead, we only
3308 support stepping over the thread event breakpoint, by asking the
3309 low target where to place a reinsert breakpoint. Since this
3310 routine assumes the breakpoint being stepped over is a thread event
3311 breakpoint, it usually assumes the return address of the current
3312 function is a good enough place to set the reinsert breakpoint. */
3313
3314 static int
3315 start_step_over (struct lwp_info *lwp)
3316 {
3317 struct thread_info *saved_inferior;
3318 CORE_ADDR pc;
3319 int step;
3320
3321 if (debug_threads)
3322 fprintf (stderr,
3323 "Starting step-over on LWP %ld. Stopping all threads\n",
3324 lwpid_of (lwp));
3325
3326 stop_all_lwps (1, lwp);
3327 gdb_assert (lwp->suspended == 0);
3328
3329 if (debug_threads)
3330 fprintf (stderr, "Done stopping all threads for step-over.\n");
3331
3332 /* Note, we should always reach here with an already adjusted PC,
3333 either by GDB (if we're resuming due to GDB's request), or by our
3334 caller, if we just finished handling an internal breakpoint GDB
3335 shouldn't care about. */
3336 pc = get_pc (lwp);
3337
3338 saved_inferior = current_inferior;
3339 current_inferior = get_lwp_thread (lwp);
3340
3341 lwp->bp_reinsert = pc;
3342 uninsert_breakpoints_at (pc);
3343 uninsert_fast_tracepoint_jumps_at (pc);
3344
3345 if (can_hardware_single_step ())
3346 {
3347 step = 1;
3348 }
3349 else
3350 {
3351 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3352 set_reinsert_breakpoint (raddr);
3353 step = 0;
3354 }
3355
3356 current_inferior = saved_inferior;
3357
3358 linux_resume_one_lwp (lwp, step, 0, NULL);
3359
3360 /* Require next event from this LWP. */
3361 step_over_bkpt = lwp->head.id;
3362 return 1;
3363 }
3364
3365 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3366 start_step_over, if still there, and delete any reinsert
3367 breakpoints we've set, on non hardware single-step targets. */
3368
3369 static int
3370 finish_step_over (struct lwp_info *lwp)
3371 {
3372 if (lwp->bp_reinsert != 0)
3373 {
3374 if (debug_threads)
3375 fprintf (stderr, "Finished step over.\n");
3376
3377 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3378 may be no breakpoint to reinsert there by now. */
3379 reinsert_breakpoints_at (lwp->bp_reinsert);
3380 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3381
3382 lwp->bp_reinsert = 0;
3383
3384 /* Delete any software-single-step reinsert breakpoints. No
3385 longer needed. We don't have to worry about other threads
3386 hitting this trap, and later not being able to explain it,
3387 because we were stepping over a breakpoint, and we hold all
3388 threads but LWP stopped while doing that. */
3389 if (!can_hardware_single_step ())
3390 delete_reinsert_breakpoints ();
3391
3392 step_over_bkpt = null_ptid;
3393 return 1;
3394 }
3395 else
3396 return 0;
3397 }
3398
3399 /* This function is called once per thread. We check the thread's resume
3400 request, which will tell us whether to resume, step, or leave the thread
3401 stopped; and what signal, if any, it should be sent.
3402
3403 For threads which we aren't explicitly told otherwise, we preserve
3404 the stepping flag; this is used for stepping over gdbserver-placed
3405 breakpoints.
3406
3407 If pending_flags was set in any thread, we queue any needed
3408 signals, since we won't actually resume. We already have a pending
3409 event to report, so we don't need to preserve any step requests;
3410 they should be re-issued if necessary. */
3411
3412 static int
3413 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3414 {
3415 struct lwp_info *lwp;
3416 struct thread_info *thread;
3417 int step;
3418 int leave_all_stopped = * (int *) arg;
3419 int leave_pending;
3420
3421 thread = (struct thread_info *) entry;
3422 lwp = get_thread_lwp (thread);
3423
3424 if (lwp->resume == NULL)
3425 return 0;
3426
3427 if (lwp->resume->kind == resume_stop)
3428 {
3429 if (debug_threads)
3430 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3431
3432 if (!lwp->stopped)
3433 {
3434 if (debug_threads)
3435 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3436
3437 /* Stop the thread, and wait for the event asynchronously,
3438 through the event loop. */
3439 send_sigstop (lwp);
3440 }
3441 else
3442 {
3443 if (debug_threads)
3444 fprintf (stderr, "already stopped LWP %ld\n",
3445 lwpid_of (lwp));
3446
3447 /* The LWP may have been stopped in an internal event that
3448 was not meant to be notified back to GDB (e.g., gdbserver
3449 breakpoint), so we should be reporting a stop event in
3450 this case too. */
3451
3452 /* If the thread already has a pending SIGSTOP, this is a
3453 no-op. Otherwise, something later will presumably resume
3454 the thread and this will cause it to cancel any pending
3455 operation, due to last_resume_kind == resume_stop. If
3456 the thread already has a pending status to report, we
3457 will still report it the next time we wait - see
3458 status_pending_p_callback. */
3459
3460 /* If we already have a pending signal to report, then
3461 there's no need to queue a SIGSTOP, as this means we're
3462 midway through moving the LWP out of the jumppad, and we
3463 will report the pending signal as soon as that is
3464 finished. */
3465 if (lwp->pending_signals_to_report == NULL)
3466 send_sigstop (lwp);
3467 }
3468
3469 /* For stop requests, we're done. */
3470 lwp->resume = NULL;
3471 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3472 return 0;
3473 }
3474
3475 /* If this thread which is about to be resumed has a pending status,
3476 then don't resume any threads - we can just report the pending
3477 status. Make sure to queue any signals that would otherwise be
3478 sent. In all-stop mode, we do this decision based on if *any*
3479 thread has a pending status. If there's a thread that needs the
3480 step-over-breakpoint dance, then don't resume any other thread
3481 but that particular one. */
3482 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3483
3484 if (!leave_pending)
3485 {
3486 if (debug_threads)
3487 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3488
3489 step = (lwp->resume->kind == resume_step);
3490 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3491 }
3492 else
3493 {
3494 if (debug_threads)
3495 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3496
3497 /* If we have a new signal, enqueue the signal. */
3498 if (lwp->resume->sig != 0)
3499 {
3500 struct pending_signals *p_sig;
3501 p_sig = xmalloc (sizeof (*p_sig));
3502 p_sig->prev = lwp->pending_signals;
3503 p_sig->signal = lwp->resume->sig;
3504 memset (&p_sig->info, 0, sizeof (siginfo_t));
3505
3506 /* If this is the same signal we were previously stopped by,
3507 make sure to queue its siginfo. We can ignore the return
3508 value of ptrace; if it fails, we'll skip
3509 PTRACE_SETSIGINFO. */
3510 if (WIFSTOPPED (lwp->last_status)
3511 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3512 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3513
3514 lwp->pending_signals = p_sig;
3515 }
3516 }
3517
3518 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3519 lwp->resume = NULL;
3520 return 0;
3521 }
3522
3523 static void
3524 linux_resume (struct thread_resume *resume_info, size_t n)
3525 {
3526 struct thread_resume_array array = { resume_info, n };
3527 struct lwp_info *need_step_over = NULL;
3528 int any_pending;
3529 int leave_all_stopped;
3530
3531 find_inferior (&all_threads, linux_set_resume_request, &array);
3532
3533 /* If there is a thread which would otherwise be resumed, which has
3534 a pending status, then don't resume any threads - we can just
3535 report the pending status. Make sure to queue any signals that
3536 would otherwise be sent. In non-stop mode, we'll apply this
3537 logic to each thread individually. We consume all pending events
3538 before considering to start a step-over (in all-stop). */
3539 any_pending = 0;
3540 if (!non_stop)
3541 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3542
3543 /* If there is a thread which would otherwise be resumed, which is
3544 stopped at a breakpoint that needs stepping over, then don't
3545 resume any threads - have it step over the breakpoint with all
3546 other threads stopped, then resume all threads again. Make sure
3547 to queue any signals that would otherwise be delivered or
3548 queued. */
3549 if (!any_pending && supports_breakpoints ())
3550 need_step_over
3551 = (struct lwp_info *) find_inferior (&all_lwps,
3552 need_step_over_p, NULL);
3553
3554 leave_all_stopped = (need_step_over != NULL || any_pending);
3555
3556 if (debug_threads)
3557 {
3558 if (need_step_over != NULL)
3559 fprintf (stderr, "Not resuming all, need step over\n");
3560 else if (any_pending)
3561 fprintf (stderr,
3562 "Not resuming, all-stop and found "
3563 "an LWP with pending status\n");
3564 else
3565 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3566 }
3567
3568 /* Even if we're leaving threads stopped, queue all signals we'd
3569 otherwise deliver. */
3570 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3571
3572 if (need_step_over)
3573 start_step_over (need_step_over);
3574 }
3575
3576 /* This function is called once per thread. We check the thread's
3577 last resume request, which will tell us whether to resume, step, or
3578 leave the thread stopped. Any signal the client requested to be
3579 delivered has already been enqueued at this point.
3580
3581 If any thread that GDB wants running is stopped at an internal
3582 breakpoint that needs stepping over, we start a step-over operation
3583 on that particular thread, and leave all others stopped. */
3584
3585 static int
3586 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3587 {
3588 struct lwp_info *lwp = (struct lwp_info *) entry;
3589 struct thread_info *thread;
3590 int step;
3591
3592 if (lwp == except)
3593 return 0;
3594
3595 if (debug_threads)
3596 fprintf (stderr,
3597 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3598
3599 if (!lwp->stopped)
3600 {
3601 if (debug_threads)
3602 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3603 return 0;
3604 }
3605
3606 thread = get_lwp_thread (lwp);
3607
3608 if (thread->last_resume_kind == resume_stop
3609 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3610 {
3611 if (debug_threads)
3612 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3613 lwpid_of (lwp));
3614 return 0;
3615 }
3616
3617 if (lwp->status_pending_p)
3618 {
3619 if (debug_threads)
3620 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3621 lwpid_of (lwp));
3622 return 0;
3623 }
3624
3625 gdb_assert (lwp->suspended >= 0);
3626
3627 if (lwp->suspended)
3628 {
3629 if (debug_threads)
3630 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3631 return 0;
3632 }
3633
3634 if (thread->last_resume_kind == resume_stop
3635 && lwp->pending_signals_to_report == NULL
3636 && lwp->collecting_fast_tracepoint == 0)
3637 {
3638 /* We haven't reported this LWP as stopped yet (otherwise, the
3639 last_status.kind check above would catch it, and we wouldn't
3640 reach here. This LWP may have been momentarily paused by a
3641 stop_all_lwps call while handling for example, another LWP's
3642 step-over. In that case, the pending expected SIGSTOP signal
3643 that was queued at vCont;t handling time will have already
3644 been consumed by wait_for_sigstop, and so we need to requeue
3645 another one here. Note that if the LWP already has a SIGSTOP
3646 pending, this is a no-op. */
3647
3648 if (debug_threads)
3649 fprintf (stderr,
3650 "Client wants LWP %ld to stop. "
3651 "Making sure it has a SIGSTOP pending\n",
3652 lwpid_of (lwp));
3653
3654 send_sigstop (lwp);
3655 }
3656
3657 step = thread->last_resume_kind == resume_step;
3658 linux_resume_one_lwp (lwp, step, 0, NULL);
3659 return 0;
3660 }
3661
3662 static int
3663 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3664 {
3665 struct lwp_info *lwp = (struct lwp_info *) entry;
3666
3667 if (lwp == except)
3668 return 0;
3669
3670 lwp->suspended--;
3671 gdb_assert (lwp->suspended >= 0);
3672
3673 return proceed_one_lwp (entry, except);
3674 }
3675
3676 /* When we finish a step-over, set threads running again. If there's
3677 another thread that may need a step-over, now's the time to start
3678 it. Eventually, we'll move all threads past their breakpoints. */
3679
3680 static void
3681 proceed_all_lwps (void)
3682 {
3683 struct lwp_info *need_step_over;
3684
3685 /* If there is a thread which would otherwise be resumed, which is
3686 stopped at a breakpoint that needs stepping over, then don't
3687 resume any threads - have it step over the breakpoint with all
3688 other threads stopped, then resume all threads again. */
3689
3690 if (supports_breakpoints ())
3691 {
3692 need_step_over
3693 = (struct lwp_info *) find_inferior (&all_lwps,
3694 need_step_over_p, NULL);
3695
3696 if (need_step_over != NULL)
3697 {
3698 if (debug_threads)
3699 fprintf (stderr, "proceed_all_lwps: found "
3700 "thread %ld needing a step-over\n",
3701 lwpid_of (need_step_over));
3702
3703 start_step_over (need_step_over);
3704 return;
3705 }
3706 }
3707
3708 if (debug_threads)
3709 fprintf (stderr, "Proceeding, no step-over needed\n");
3710
3711 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3712 }
3713
3714 /* Stopped LWPs that the client wanted to be running, that don't have
3715 pending statuses, are set to run again, except for EXCEPT, if not
3716 NULL. This undoes a stop_all_lwps call. */
3717
3718 static void
3719 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3720 {
3721 if (debug_threads)
3722 {
3723 if (except)
3724 fprintf (stderr,
3725 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3726 else
3727 fprintf (stderr,
3728 "unstopping all lwps\n");
3729 }
3730
3731 if (unsuspend)
3732 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3733 else
3734 find_inferior (&all_lwps, proceed_one_lwp, except);
3735 }
3736
3737 #ifdef HAVE_LINUX_USRREGS
3738
3739 int
3740 register_addr (int regnum)
3741 {
3742 int addr;
3743
3744 if (regnum < 0 || regnum >= the_low_target.num_regs)
3745 error ("Invalid register number %d.", regnum);
3746
3747 addr = the_low_target.regmap[regnum];
3748
3749 return addr;
3750 }
3751
3752 /* Fetch one register. */
3753 static void
3754 fetch_register (struct regcache *regcache, int regno)
3755 {
3756 CORE_ADDR regaddr;
3757 int i, size;
3758 char *buf;
3759 int pid;
3760
3761 if (regno >= the_low_target.num_regs)
3762 return;
3763 if ((*the_low_target.cannot_fetch_register) (regno))
3764 return;
3765
3766 regaddr = register_addr (regno);
3767 if (regaddr == -1)
3768 return;
3769
3770 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3771 & -sizeof (PTRACE_XFER_TYPE));
3772 buf = alloca (size);
3773
3774 pid = lwpid_of (get_thread_lwp (current_inferior));
3775 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3776 {
3777 errno = 0;
3778 *(PTRACE_XFER_TYPE *) (buf + i) =
3779 ptrace (PTRACE_PEEKUSER, pid,
3780 /* Coerce to a uintptr_t first to avoid potential gcc warning
3781 of coercing an 8 byte integer to a 4 byte pointer. */
3782 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3783 regaddr += sizeof (PTRACE_XFER_TYPE);
3784 if (errno != 0)
3785 error ("reading register %d: %s", regno, strerror (errno));
3786 }
3787
3788 if (the_low_target.supply_ptrace_register)
3789 the_low_target.supply_ptrace_register (regcache, regno, buf);
3790 else
3791 supply_register (regcache, regno, buf);
3792 }
3793
3794 /* Store one register. */
3795 static void
3796 store_register (struct regcache *regcache, int regno)
3797 {
3798 CORE_ADDR regaddr;
3799 int i, size;
3800 char *buf;
3801 int pid;
3802
3803 if (regno >= the_low_target.num_regs)
3804 return;
3805 if ((*the_low_target.cannot_store_register) (regno))
3806 return;
3807
3808 regaddr = register_addr (regno);
3809 if (regaddr == -1)
3810 return;
3811
3812 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3813 & -sizeof (PTRACE_XFER_TYPE));
3814 buf = alloca (size);
3815 memset (buf, 0, size);
3816
3817 if (the_low_target.collect_ptrace_register)
3818 the_low_target.collect_ptrace_register (regcache, regno, buf);
3819 else
3820 collect_register (regcache, regno, buf);
3821
3822 pid = lwpid_of (get_thread_lwp (current_inferior));
3823 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3824 {
3825 errno = 0;
3826 ptrace (PTRACE_POKEUSER, pid,
3827 /* Coerce to a uintptr_t first to avoid potential gcc warning
3828 about coercing an 8 byte integer to a 4 byte pointer. */
3829 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3830 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3831 if (errno != 0)
3832 {
3833 /* At this point, ESRCH should mean the process is
3834 already gone, in which case we simply ignore attempts
3835 to change its registers. See also the related
3836 comment in linux_resume_one_lwp. */
3837 if (errno == ESRCH)
3838 return;
3839
3840 if ((*the_low_target.cannot_store_register) (regno) == 0)
3841 error ("writing register %d: %s", regno, strerror (errno));
3842 }
3843 regaddr += sizeof (PTRACE_XFER_TYPE);
3844 }
3845 }
3846
3847 /* Fetch all registers, or just one, from the child process. */
3848 static void
3849 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3850 {
3851 if (regno == -1)
3852 for (regno = 0; regno < the_low_target.num_regs; regno++)
3853 fetch_register (regcache, regno);
3854 else
3855 fetch_register (regcache, regno);
3856 }
3857
3858 /* Store our register values back into the inferior.
3859 If REGNO is -1, do this for all registers.
3860 Otherwise, REGNO specifies which register (so we can save time). */
3861 static void
3862 usr_store_inferior_registers (struct regcache *regcache, int regno)
3863 {
3864 if (regno == -1)
3865 for (regno = 0; regno < the_low_target.num_regs; regno++)
3866 store_register (regcache, regno);
3867 else
3868 store_register (regcache, regno);
3869 }
3870 #endif /* HAVE_LINUX_USRREGS */
3871
3872
3873
3874 #ifdef HAVE_LINUX_REGSETS
3875
3876 static int
3877 regsets_fetch_inferior_registers (struct regcache *regcache)
3878 {
3879 struct regset_info *regset;
3880 int saw_general_regs = 0;
3881 int pid;
3882 struct iovec iov;
3883
3884 regset = target_regsets;
3885
3886 pid = lwpid_of (get_thread_lwp (current_inferior));
3887 while (regset->size >= 0)
3888 {
3889 void *buf, *data;
3890 int nt_type, res;
3891
3892 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3893 {
3894 regset ++;
3895 continue;
3896 }
3897
3898 buf = xmalloc (regset->size);
3899
3900 nt_type = regset->nt_type;
3901 if (nt_type)
3902 {
3903 iov.iov_base = buf;
3904 iov.iov_len = regset->size;
3905 data = (void *) &iov;
3906 }
3907 else
3908 data = buf;
3909
3910 #ifndef __sparc__
3911 res = ptrace (regset->get_request, pid, nt_type, data);
3912 #else
3913 res = ptrace (regset->get_request, pid, data, nt_type);
3914 #endif
3915 if (res < 0)
3916 {
3917 if (errno == EIO)
3918 {
3919 /* If we get EIO on a regset, do not try it again for
3920 this process. */
3921 disabled_regsets[regset - target_regsets] = 1;
3922 free (buf);
3923 continue;
3924 }
3925 else
3926 {
3927 char s[256];
3928 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3929 pid);
3930 perror (s);
3931 }
3932 }
3933 else if (regset->type == GENERAL_REGS)
3934 saw_general_regs = 1;
3935 regset->store_function (regcache, buf);
3936 regset ++;
3937 free (buf);
3938 }
3939 if (saw_general_regs)
3940 return 0;
3941 else
3942 return 1;
3943 }
3944
3945 static int
3946 regsets_store_inferior_registers (struct regcache *regcache)
3947 {
3948 struct regset_info *regset;
3949 int saw_general_regs = 0;
3950 int pid;
3951 struct iovec iov;
3952
3953 regset = target_regsets;
3954
3955 pid = lwpid_of (get_thread_lwp (current_inferior));
3956 while (regset->size >= 0)
3957 {
3958 void *buf, *data;
3959 int nt_type, res;
3960
3961 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3962 {
3963 regset ++;
3964 continue;
3965 }
3966
3967 buf = xmalloc (regset->size);
3968
3969 /* First fill the buffer with the current register set contents,
3970 in case there are any items in the kernel's regset that are
3971 not in gdbserver's regcache. */
3972
3973 nt_type = regset->nt_type;
3974 if (nt_type)
3975 {
3976 iov.iov_base = buf;
3977 iov.iov_len = regset->size;
3978 data = (void *) &iov;
3979 }
3980 else
3981 data = buf;
3982
3983 #ifndef __sparc__
3984 res = ptrace (regset->get_request, pid, nt_type, data);
3985 #else
3986 res = ptrace (regset->get_request, pid, &iov, data);
3987 #endif
3988
3989 if (res == 0)
3990 {
3991 /* Then overlay our cached registers on that. */
3992 regset->fill_function (regcache, buf);
3993
3994 /* Only now do we write the register set. */
3995 #ifndef __sparc__
3996 res = ptrace (regset->set_request, pid, nt_type, data);
3997 #else
3998 res = ptrace (regset->set_request, pid, data, nt_type);
3999 #endif
4000 }
4001
4002 if (res < 0)
4003 {
4004 if (errno == EIO)
4005 {
4006 /* If we get EIO on a regset, do not try it again for
4007 this process. */
4008 disabled_regsets[regset - target_regsets] = 1;
4009 free (buf);
4010 continue;
4011 }
4012 else if (errno == ESRCH)
4013 {
4014 /* At this point, ESRCH should mean the process is
4015 already gone, in which case we simply ignore attempts
4016 to change its registers. See also the related
4017 comment in linux_resume_one_lwp. */
4018 free (buf);
4019 return 0;
4020 }
4021 else
4022 {
4023 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4024 }
4025 }
4026 else if (regset->type == GENERAL_REGS)
4027 saw_general_regs = 1;
4028 regset ++;
4029 free (buf);
4030 }
4031 if (saw_general_regs)
4032 return 0;
4033 else
4034 return 1;
4035 return 0;
4036 }
4037
4038 #endif /* HAVE_LINUX_REGSETS */
4039
4040
4041 void
4042 linux_fetch_registers (struct regcache *regcache, int regno)
4043 {
4044 #ifdef HAVE_LINUX_REGSETS
4045 if (regsets_fetch_inferior_registers (regcache) == 0)
4046 return;
4047 #endif
4048 #ifdef HAVE_LINUX_USRREGS
4049 usr_fetch_inferior_registers (regcache, regno);
4050 #endif
4051 }
4052
4053 void
4054 linux_store_registers (struct regcache *regcache, int regno)
4055 {
4056 #ifdef HAVE_LINUX_REGSETS
4057 if (regsets_store_inferior_registers (regcache) == 0)
4058 return;
4059 #endif
4060 #ifdef HAVE_LINUX_USRREGS
4061 usr_store_inferior_registers (regcache, regno);
4062 #endif
4063 }
4064
4065
4066 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4067 to debugger memory starting at MYADDR. */
4068
4069 static int
4070 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4071 {
4072 register int i;
4073 /* Round starting address down to longword boundary. */
4074 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4075 /* Round ending address up; get number of longwords that makes. */
4076 register int count
4077 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4078 / sizeof (PTRACE_XFER_TYPE);
4079 /* Allocate buffer of that many longwords. */
4080 register PTRACE_XFER_TYPE *buffer
4081 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4082 int fd;
4083 char filename[64];
4084 int pid = lwpid_of (get_thread_lwp (current_inferior));
4085
4086 /* Try using /proc. Don't bother for one word. */
4087 if (len >= 3 * sizeof (long))
4088 {
4089 /* We could keep this file open and cache it - possibly one per
4090 thread. That requires some juggling, but is even faster. */
4091 sprintf (filename, "/proc/%d/mem", pid);
4092 fd = open (filename, O_RDONLY | O_LARGEFILE);
4093 if (fd == -1)
4094 goto no_proc;
4095
4096 /* If pread64 is available, use it. It's faster if the kernel
4097 supports it (only one syscall), and it's 64-bit safe even on
4098 32-bit platforms (for instance, SPARC debugging a SPARC64
4099 application). */
4100 #ifdef HAVE_PREAD64
4101 if (pread64 (fd, myaddr, len, memaddr) != len)
4102 #else
4103 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4104 #endif
4105 {
4106 close (fd);
4107 goto no_proc;
4108 }
4109
4110 close (fd);
4111 return 0;
4112 }
4113
4114 no_proc:
4115 /* Read all the longwords */
4116 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4117 {
4118 errno = 0;
4119 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4120 about coercing an 8 byte integer to a 4 byte pointer. */
4121 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4122 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4123 if (errno)
4124 return errno;
4125 }
4126
4127 /* Copy appropriate bytes out of the buffer. */
4128 memcpy (myaddr,
4129 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4130 len);
4131
4132 return 0;
4133 }
4134
4135 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4136 memory at MEMADDR. On failure (cannot write to the inferior)
4137 returns the value of errno. */
4138
4139 static int
4140 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4141 {
4142 register int i;
4143 /* Round starting address down to longword boundary. */
4144 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4145 /* Round ending address up; get number of longwords that makes. */
4146 register int count
4147 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4148 / sizeof (PTRACE_XFER_TYPE);
4149
4150 /* Allocate buffer of that many longwords. */
4151 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4152 alloca (count * sizeof (PTRACE_XFER_TYPE));
4153
4154 int pid = lwpid_of (get_thread_lwp (current_inferior));
4155
4156 if (debug_threads)
4157 {
4158 /* Dump up to four bytes. */
4159 unsigned int val = * (unsigned int *) myaddr;
4160 if (len == 1)
4161 val = val & 0xff;
4162 else if (len == 2)
4163 val = val & 0xffff;
4164 else if (len == 3)
4165 val = val & 0xffffff;
4166 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4167 val, (long)memaddr);
4168 }
4169
4170 /* Fill start and end extra bytes of buffer with existing memory data. */
4171
4172 errno = 0;
4173 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4174 about coercing an 8 byte integer to a 4 byte pointer. */
4175 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4176 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4177 if (errno)
4178 return errno;
4179
4180 if (count > 1)
4181 {
4182 errno = 0;
4183 buffer[count - 1]
4184 = ptrace (PTRACE_PEEKTEXT, pid,
4185 /* Coerce to a uintptr_t first to avoid potential gcc warning
4186 about coercing an 8 byte integer to a 4 byte pointer. */
4187 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4188 * sizeof (PTRACE_XFER_TYPE)),
4189 0);
4190 if (errno)
4191 return errno;
4192 }
4193
4194 /* Copy data to be written over corresponding part of buffer. */
4195
4196 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4197 myaddr, len);
4198
4199 /* Write the entire buffer. */
4200
4201 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4202 {
4203 errno = 0;
4204 ptrace (PTRACE_POKETEXT, pid,
4205 /* Coerce to a uintptr_t first to avoid potential gcc warning
4206 about coercing an 8 byte integer to a 4 byte pointer. */
4207 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4208 (PTRACE_ARG4_TYPE) buffer[i]);
4209 if (errno)
4210 return errno;
4211 }
4212
4213 return 0;
4214 }
4215
4216 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4217 static int linux_supports_tracefork_flag;
4218
4219 static void
4220 linux_enable_event_reporting (int pid)
4221 {
4222 if (!linux_supports_tracefork_flag)
4223 return;
4224
4225 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4226 }
4227
4228 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4229
4230 static int
4231 linux_tracefork_grandchild (void *arg)
4232 {
4233 _exit (0);
4234 }
4235
4236 #define STACK_SIZE 4096
4237
4238 static int
4239 linux_tracefork_child (void *arg)
4240 {
4241 ptrace (PTRACE_TRACEME, 0, 0, 0);
4242 kill (getpid (), SIGSTOP);
4243
4244 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4245
4246 if (fork () == 0)
4247 linux_tracefork_grandchild (NULL);
4248
4249 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4250
4251 #ifdef __ia64__
4252 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4253 CLONE_VM | SIGCHLD, NULL);
4254 #else
4255 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4256 CLONE_VM | SIGCHLD, NULL);
4257 #endif
4258
4259 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4260
4261 _exit (0);
4262 }
4263
4264 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4265 sure that we can enable the option, and that it had the desired
4266 effect. */
4267
4268 static void
4269 linux_test_for_tracefork (void)
4270 {
4271 int child_pid, ret, status;
4272 long second_pid;
4273 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4274 char *stack = xmalloc (STACK_SIZE * 4);
4275 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4276
4277 linux_supports_tracefork_flag = 0;
4278
4279 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4280
4281 child_pid = fork ();
4282 if (child_pid == 0)
4283 linux_tracefork_child (NULL);
4284
4285 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4286
4287 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4288 #ifdef __ia64__
4289 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4290 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4291 #else /* !__ia64__ */
4292 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4293 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4294 #endif /* !__ia64__ */
4295
4296 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4297
4298 if (child_pid == -1)
4299 perror_with_name ("clone");
4300
4301 ret = my_waitpid (child_pid, &status, 0);
4302 if (ret == -1)
4303 perror_with_name ("waitpid");
4304 else if (ret != child_pid)
4305 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4306 if (! WIFSTOPPED (status))
4307 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4308
4309 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4310 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4311 if (ret != 0)
4312 {
4313 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4314 if (ret != 0)
4315 {
4316 warning ("linux_test_for_tracefork: failed to kill child");
4317 return;
4318 }
4319
4320 ret = my_waitpid (child_pid, &status, 0);
4321 if (ret != child_pid)
4322 warning ("linux_test_for_tracefork: failed to wait for killed child");
4323 else if (!WIFSIGNALED (status))
4324 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4325 "killed child", status);
4326
4327 return;
4328 }
4329
4330 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4331 if (ret != 0)
4332 warning ("linux_test_for_tracefork: failed to resume child");
4333
4334 ret = my_waitpid (child_pid, &status, 0);
4335
4336 if (ret == child_pid && WIFSTOPPED (status)
4337 && status >> 16 == PTRACE_EVENT_FORK)
4338 {
4339 second_pid = 0;
4340 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4341 if (ret == 0 && second_pid != 0)
4342 {
4343 int second_status;
4344
4345 linux_supports_tracefork_flag = 1;
4346 my_waitpid (second_pid, &second_status, 0);
4347 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4348 if (ret != 0)
4349 warning ("linux_test_for_tracefork: failed to kill second child");
4350 my_waitpid (second_pid, &status, 0);
4351 }
4352 }
4353 else
4354 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4355 "(%d, status 0x%x)", ret, status);
4356
4357 do
4358 {
4359 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4360 if (ret != 0)
4361 warning ("linux_test_for_tracefork: failed to kill child");
4362 my_waitpid (child_pid, &status, 0);
4363 }
4364 while (WIFSTOPPED (status));
4365
4366 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4367 free (stack);
4368 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4369 }
4370
4371
4372 static void
4373 linux_look_up_symbols (void)
4374 {
4375 #ifdef USE_THREAD_DB
4376 struct process_info *proc = current_process ();
4377
4378 if (proc->private->thread_db != NULL)
4379 return;
4380
4381 /* If the kernel supports tracing forks then it also supports tracing
4382 clones, and then we don't need to use the magic thread event breakpoint
4383 to learn about threads. */
4384 thread_db_init (!linux_supports_tracefork_flag);
4385 #endif
4386 }
4387
4388 static void
4389 linux_request_interrupt (void)
4390 {
4391 extern unsigned long signal_pid;
4392
4393 if (!ptid_equal (cont_thread, null_ptid)
4394 && !ptid_equal (cont_thread, minus_one_ptid))
4395 {
4396 struct lwp_info *lwp;
4397 int lwpid;
4398
4399 lwp = get_thread_lwp (current_inferior);
4400 lwpid = lwpid_of (lwp);
4401 kill_lwp (lwpid, SIGINT);
4402 }
4403 else
4404 kill_lwp (signal_pid, SIGINT);
4405 }
4406
4407 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4408 to debugger memory starting at MYADDR. */
4409
4410 static int
4411 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4412 {
4413 char filename[PATH_MAX];
4414 int fd, n;
4415 int pid = lwpid_of (get_thread_lwp (current_inferior));
4416
4417 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4418
4419 fd = open (filename, O_RDONLY);
4420 if (fd < 0)
4421 return -1;
4422
4423 if (offset != (CORE_ADDR) 0
4424 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4425 n = -1;
4426 else
4427 n = read (fd, myaddr, len);
4428
4429 close (fd);
4430
4431 return n;
4432 }
4433
4434 /* These breakpoint and watchpoint related wrapper functions simply
4435 pass on the function call if the target has registered a
4436 corresponding function. */
4437
4438 static int
4439 linux_insert_point (char type, CORE_ADDR addr, int len)
4440 {
4441 if (the_low_target.insert_point != NULL)
4442 return the_low_target.insert_point (type, addr, len);
4443 else
4444 /* Unsupported (see target.h). */
4445 return 1;
4446 }
4447
4448 static int
4449 linux_remove_point (char type, CORE_ADDR addr, int len)
4450 {
4451 if (the_low_target.remove_point != NULL)
4452 return the_low_target.remove_point (type, addr, len);
4453 else
4454 /* Unsupported (see target.h). */
4455 return 1;
4456 }
4457
4458 static int
4459 linux_stopped_by_watchpoint (void)
4460 {
4461 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4462
4463 return lwp->stopped_by_watchpoint;
4464 }
4465
4466 static CORE_ADDR
4467 linux_stopped_data_address (void)
4468 {
4469 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4470
4471 return lwp->stopped_data_address;
4472 }
4473
4474 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4475 #if defined(__mcoldfire__)
4476 /* These should really be defined in the kernel's ptrace.h header. */
4477 #define PT_TEXT_ADDR 49*4
4478 #define PT_DATA_ADDR 50*4
4479 #define PT_TEXT_END_ADDR 51*4
4480 #elif defined(BFIN)
4481 #define PT_TEXT_ADDR 220
4482 #define PT_TEXT_END_ADDR 224
4483 #define PT_DATA_ADDR 228
4484 #elif defined(__TMS320C6X__)
4485 #define PT_TEXT_ADDR (0x10000*4)
4486 #define PT_DATA_ADDR (0x10004*4)
4487 #define PT_TEXT_END_ADDR (0x10008*4)
4488 #endif
4489
4490 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4491 to tell gdb about. */
4492
4493 static int
4494 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4495 {
4496 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4497 unsigned long text, text_end, data;
4498 int pid = lwpid_of (get_thread_lwp (current_inferior));
4499
4500 errno = 0;
4501
4502 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4503 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4504 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4505
4506 if (errno == 0)
4507 {
4508 /* Both text and data offsets produced at compile-time (and so
4509 used by gdb) are relative to the beginning of the program,
4510 with the data segment immediately following the text segment.
4511 However, the actual runtime layout in memory may put the data
4512 somewhere else, so when we send gdb a data base-address, we
4513 use the real data base address and subtract the compile-time
4514 data base-address from it (which is just the length of the
4515 text segment). BSS immediately follows data in both
4516 cases. */
4517 *text_p = text;
4518 *data_p = data - (text_end - text);
4519
4520 return 1;
4521 }
4522 #endif
4523 return 0;
4524 }
4525 #endif
4526
4527 static int
4528 linux_qxfer_osdata (const char *annex,
4529 unsigned char *readbuf, unsigned const char *writebuf,
4530 CORE_ADDR offset, int len)
4531 {
4532 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4533 }
4534
4535 /* Convert a native/host siginfo object, into/from the siginfo in the
4536 layout of the inferiors' architecture. */
4537
4538 static void
4539 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4540 {
4541 int done = 0;
4542
4543 if (the_low_target.siginfo_fixup != NULL)
4544 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4545
4546 /* If there was no callback, or the callback didn't do anything,
4547 then just do a straight memcpy. */
4548 if (!done)
4549 {
4550 if (direction == 1)
4551 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4552 else
4553 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4554 }
4555 }
4556
4557 static int
4558 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4559 unsigned const char *writebuf, CORE_ADDR offset, int len)
4560 {
4561 int pid;
4562 struct siginfo siginfo;
4563 char inf_siginfo[sizeof (struct siginfo)];
4564
4565 if (current_inferior == NULL)
4566 return -1;
4567
4568 pid = lwpid_of (get_thread_lwp (current_inferior));
4569
4570 if (debug_threads)
4571 fprintf (stderr, "%s siginfo for lwp %d.\n",
4572 readbuf != NULL ? "Reading" : "Writing",
4573 pid);
4574
4575 if (offset >= sizeof (siginfo))
4576 return -1;
4577
4578 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4579 return -1;
4580
4581 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4582 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4583 inferior with a 64-bit GDBSERVER should look the same as debugging it
4584 with a 32-bit GDBSERVER, we need to convert it. */
4585 siginfo_fixup (&siginfo, inf_siginfo, 0);
4586
4587 if (offset + len > sizeof (siginfo))
4588 len = sizeof (siginfo) - offset;
4589
4590 if (readbuf != NULL)
4591 memcpy (readbuf, inf_siginfo + offset, len);
4592 else
4593 {
4594 memcpy (inf_siginfo + offset, writebuf, len);
4595
4596 /* Convert back to ptrace layout before flushing it out. */
4597 siginfo_fixup (&siginfo, inf_siginfo, 1);
4598
4599 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4600 return -1;
4601 }
4602
4603 return len;
4604 }
4605
4606 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4607 so we notice when children change state; as the handler for the
4608 sigsuspend in my_waitpid. */
4609
4610 static void
4611 sigchld_handler (int signo)
4612 {
4613 int old_errno = errno;
4614
4615 if (debug_threads)
4616 {
4617 do
4618 {
4619 /* fprintf is not async-signal-safe, so call write
4620 directly. */
4621 if (write (2, "sigchld_handler\n",
4622 sizeof ("sigchld_handler\n") - 1) < 0)
4623 break; /* just ignore */
4624 } while (0);
4625 }
4626
4627 if (target_is_async_p ())
4628 async_file_mark (); /* trigger a linux_wait */
4629
4630 errno = old_errno;
4631 }
4632
4633 static int
4634 linux_supports_non_stop (void)
4635 {
4636 return 1;
4637 }
4638
4639 static int
4640 linux_async (int enable)
4641 {
4642 int previous = (linux_event_pipe[0] != -1);
4643
4644 if (debug_threads)
4645 fprintf (stderr, "linux_async (%d), previous=%d\n",
4646 enable, previous);
4647
4648 if (previous != enable)
4649 {
4650 sigset_t mask;
4651 sigemptyset (&mask);
4652 sigaddset (&mask, SIGCHLD);
4653
4654 sigprocmask (SIG_BLOCK, &mask, NULL);
4655
4656 if (enable)
4657 {
4658 if (pipe (linux_event_pipe) == -1)
4659 fatal ("creating event pipe failed.");
4660
4661 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4662 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4663
4664 /* Register the event loop handler. */
4665 add_file_handler (linux_event_pipe[0],
4666 handle_target_event, NULL);
4667
4668 /* Always trigger a linux_wait. */
4669 async_file_mark ();
4670 }
4671 else
4672 {
4673 delete_file_handler (linux_event_pipe[0]);
4674
4675 close (linux_event_pipe[0]);
4676 close (linux_event_pipe[1]);
4677 linux_event_pipe[0] = -1;
4678 linux_event_pipe[1] = -1;
4679 }
4680
4681 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4682 }
4683
4684 return previous;
4685 }
4686
4687 static int
4688 linux_start_non_stop (int nonstop)
4689 {
4690 /* Register or unregister from event-loop accordingly. */
4691 linux_async (nonstop);
4692 return 0;
4693 }
4694
4695 static int
4696 linux_supports_multi_process (void)
4697 {
4698 return 1;
4699 }
4700
4701 static int
4702 linux_supports_disable_randomization (void)
4703 {
4704 #ifdef HAVE_PERSONALITY
4705 return 1;
4706 #else
4707 return 0;
4708 #endif
4709 }
4710
4711 /* Enumerate spufs IDs for process PID. */
4712 static int
4713 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4714 {
4715 int pos = 0;
4716 int written = 0;
4717 char path[128];
4718 DIR *dir;
4719 struct dirent *entry;
4720
4721 sprintf (path, "/proc/%ld/fd", pid);
4722 dir = opendir (path);
4723 if (!dir)
4724 return -1;
4725
4726 rewinddir (dir);
4727 while ((entry = readdir (dir)) != NULL)
4728 {
4729 struct stat st;
4730 struct statfs stfs;
4731 int fd;
4732
4733 fd = atoi (entry->d_name);
4734 if (!fd)
4735 continue;
4736
4737 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4738 if (stat (path, &st) != 0)
4739 continue;
4740 if (!S_ISDIR (st.st_mode))
4741 continue;
4742
4743 if (statfs (path, &stfs) != 0)
4744 continue;
4745 if (stfs.f_type != SPUFS_MAGIC)
4746 continue;
4747
4748 if (pos >= offset && pos + 4 <= offset + len)
4749 {
4750 *(unsigned int *)(buf + pos - offset) = fd;
4751 written += 4;
4752 }
4753 pos += 4;
4754 }
4755
4756 closedir (dir);
4757 return written;
4758 }
4759
4760 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4761 object type, using the /proc file system. */
4762 static int
4763 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4764 unsigned const char *writebuf,
4765 CORE_ADDR offset, int len)
4766 {
4767 long pid = lwpid_of (get_thread_lwp (current_inferior));
4768 char buf[128];
4769 int fd = 0;
4770 int ret = 0;
4771
4772 if (!writebuf && !readbuf)
4773 return -1;
4774
4775 if (!*annex)
4776 {
4777 if (!readbuf)
4778 return -1;
4779 else
4780 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4781 }
4782
4783 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4784 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4785 if (fd <= 0)
4786 return -1;
4787
4788 if (offset != 0
4789 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4790 {
4791 close (fd);
4792 return 0;
4793 }
4794
4795 if (writebuf)
4796 ret = write (fd, writebuf, (size_t) len);
4797 else
4798 ret = read (fd, readbuf, (size_t) len);
4799
4800 close (fd);
4801 return ret;
4802 }
4803
4804 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4805 struct target_loadseg
4806 {
4807 /* Core address to which the segment is mapped. */
4808 Elf32_Addr addr;
4809 /* VMA recorded in the program header. */
4810 Elf32_Addr p_vaddr;
4811 /* Size of this segment in memory. */
4812 Elf32_Word p_memsz;
4813 };
4814
4815 # if defined PT_GETDSBT
4816 struct target_loadmap
4817 {
4818 /* Protocol version number, must be zero. */
4819 Elf32_Word version;
4820 /* Pointer to the DSBT table, its size, and the DSBT index. */
4821 unsigned *dsbt_table;
4822 unsigned dsbt_size, dsbt_index;
4823 /* Number of segments in this map. */
4824 Elf32_Word nsegs;
4825 /* The actual memory map. */
4826 struct target_loadseg segs[/*nsegs*/];
4827 };
4828 # define LINUX_LOADMAP PT_GETDSBT
4829 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4830 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4831 # else
4832 struct target_loadmap
4833 {
4834 /* Protocol version number, must be zero. */
4835 Elf32_Half version;
4836 /* Number of segments in this map. */
4837 Elf32_Half nsegs;
4838 /* The actual memory map. */
4839 struct target_loadseg segs[/*nsegs*/];
4840 };
4841 # define LINUX_LOADMAP PTRACE_GETFDPIC
4842 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4843 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4844 # endif
4845
4846 static int
4847 linux_read_loadmap (const char *annex, CORE_ADDR offset,
4848 unsigned char *myaddr, unsigned int len)
4849 {
4850 int pid = lwpid_of (get_thread_lwp (current_inferior));
4851 int addr = -1;
4852 struct target_loadmap *data = NULL;
4853 unsigned int actual_length, copy_length;
4854
4855 if (strcmp (annex, "exec") == 0)
4856 addr = (int) LINUX_LOADMAP_EXEC;
4857 else if (strcmp (annex, "interp") == 0)
4858 addr = (int) LINUX_LOADMAP_INTERP;
4859 else
4860 return -1;
4861
4862 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
4863 return -1;
4864
4865 if (data == NULL)
4866 return -1;
4867
4868 actual_length = sizeof (struct target_loadmap)
4869 + sizeof (struct target_loadseg) * data->nsegs;
4870
4871 if (offset < 0 || offset > actual_length)
4872 return -1;
4873
4874 copy_length = actual_length - offset < len ? actual_length - offset : len;
4875 memcpy (myaddr, (char *) data + offset, copy_length);
4876 return copy_length;
4877 }
4878 #else
4879 # define linux_read_loadmap NULL
4880 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
4881
4882 static void
4883 linux_process_qsupported (const char *query)
4884 {
4885 if (the_low_target.process_qsupported != NULL)
4886 the_low_target.process_qsupported (query);
4887 }
4888
4889 static int
4890 linux_supports_tracepoints (void)
4891 {
4892 if (*the_low_target.supports_tracepoints == NULL)
4893 return 0;
4894
4895 return (*the_low_target.supports_tracepoints) ();
4896 }
4897
4898 static CORE_ADDR
4899 linux_read_pc (struct regcache *regcache)
4900 {
4901 if (the_low_target.get_pc == NULL)
4902 return 0;
4903
4904 return (*the_low_target.get_pc) (regcache);
4905 }
4906
4907 static void
4908 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4909 {
4910 gdb_assert (the_low_target.set_pc != NULL);
4911
4912 (*the_low_target.set_pc) (regcache, pc);
4913 }
4914
4915 static int
4916 linux_thread_stopped (struct thread_info *thread)
4917 {
4918 return get_thread_lwp (thread)->stopped;
4919 }
4920
4921 /* This exposes stop-all-threads functionality to other modules. */
4922
4923 static void
4924 linux_pause_all (int freeze)
4925 {
4926 stop_all_lwps (freeze, NULL);
4927 }
4928
4929 /* This exposes unstop-all-threads functionality to other gdbserver
4930 modules. */
4931
4932 static void
4933 linux_unpause_all (int unfreeze)
4934 {
4935 unstop_all_lwps (unfreeze, NULL);
4936 }
4937
4938 static int
4939 linux_prepare_to_access_memory (void)
4940 {
4941 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4942 running LWP. */
4943 if (non_stop)
4944 linux_pause_all (1);
4945 return 0;
4946 }
4947
4948 static void
4949 linux_done_accessing_memory (void)
4950 {
4951 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4952 running LWP. */
4953 if (non_stop)
4954 linux_unpause_all (1);
4955 }
4956
4957 static int
4958 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4959 CORE_ADDR collector,
4960 CORE_ADDR lockaddr,
4961 ULONGEST orig_size,
4962 CORE_ADDR *jump_entry,
4963 CORE_ADDR *trampoline,
4964 ULONGEST *trampoline_size,
4965 unsigned char *jjump_pad_insn,
4966 ULONGEST *jjump_pad_insn_size,
4967 CORE_ADDR *adjusted_insn_addr,
4968 CORE_ADDR *adjusted_insn_addr_end,
4969 char *err)
4970 {
4971 return (*the_low_target.install_fast_tracepoint_jump_pad)
4972 (tpoint, tpaddr, collector, lockaddr, orig_size,
4973 jump_entry, trampoline, trampoline_size,
4974 jjump_pad_insn, jjump_pad_insn_size,
4975 adjusted_insn_addr, adjusted_insn_addr_end,
4976 err);
4977 }
4978
4979 static struct emit_ops *
4980 linux_emit_ops (void)
4981 {
4982 if (the_low_target.emit_ops != NULL)
4983 return (*the_low_target.emit_ops) ();
4984 else
4985 return NULL;
4986 }
4987
4988 static int
4989 linux_get_min_fast_tracepoint_insn_len (void)
4990 {
4991 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
4992 }
4993
4994 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
4995
4996 static int
4997 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
4998 CORE_ADDR *phdr_memaddr, int *num_phdr)
4999 {
5000 char filename[PATH_MAX];
5001 int fd;
5002 const int auxv_size = is_elf64
5003 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5004 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5005
5006 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5007
5008 fd = open (filename, O_RDONLY);
5009 if (fd < 0)
5010 return 1;
5011
5012 *phdr_memaddr = 0;
5013 *num_phdr = 0;
5014 while (read (fd, buf, auxv_size) == auxv_size
5015 && (*phdr_memaddr == 0 || *num_phdr == 0))
5016 {
5017 if (is_elf64)
5018 {
5019 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5020
5021 switch (aux->a_type)
5022 {
5023 case AT_PHDR:
5024 *phdr_memaddr = aux->a_un.a_val;
5025 break;
5026 case AT_PHNUM:
5027 *num_phdr = aux->a_un.a_val;
5028 break;
5029 }
5030 }
5031 else
5032 {
5033 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5034
5035 switch (aux->a_type)
5036 {
5037 case AT_PHDR:
5038 *phdr_memaddr = aux->a_un.a_val;
5039 break;
5040 case AT_PHNUM:
5041 *num_phdr = aux->a_un.a_val;
5042 break;
5043 }
5044 }
5045 }
5046
5047 close (fd);
5048
5049 if (*phdr_memaddr == 0 || *num_phdr == 0)
5050 {
5051 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5052 "phdr_memaddr = %ld, phdr_num = %d",
5053 (long) *phdr_memaddr, *num_phdr);
5054 return 2;
5055 }
5056
5057 return 0;
5058 }
5059
5060 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5061
5062 static CORE_ADDR
5063 get_dynamic (const int pid, const int is_elf64)
5064 {
5065 CORE_ADDR phdr_memaddr, relocation;
5066 int num_phdr, i;
5067 unsigned char *phdr_buf;
5068 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5069
5070 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5071 return 0;
5072
5073 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5074 phdr_buf = alloca (num_phdr * phdr_size);
5075
5076 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5077 return 0;
5078
5079 /* Compute relocation: it is expected to be 0 for "regular" executables,
5080 non-zero for PIE ones. */
5081 relocation = -1;
5082 for (i = 0; relocation == -1 && i < num_phdr; i++)
5083 if (is_elf64)
5084 {
5085 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5086
5087 if (p->p_type == PT_PHDR)
5088 relocation = phdr_memaddr - p->p_vaddr;
5089 }
5090 else
5091 {
5092 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5093
5094 if (p->p_type == PT_PHDR)
5095 relocation = phdr_memaddr - p->p_vaddr;
5096 }
5097
5098 if (relocation == -1)
5099 {
5100 warning ("Unexpected missing PT_PHDR");
5101 return 0;
5102 }
5103
5104 for (i = 0; i < num_phdr; i++)
5105 {
5106 if (is_elf64)
5107 {
5108 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5109
5110 if (p->p_type == PT_DYNAMIC)
5111 return p->p_vaddr + relocation;
5112 }
5113 else
5114 {
5115 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5116
5117 if (p->p_type == PT_DYNAMIC)
5118 return p->p_vaddr + relocation;
5119 }
5120 }
5121
5122 return 0;
5123 }
5124
5125 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5126 can be 0 if the inferior does not yet have the library list initialized. */
5127
5128 static CORE_ADDR
5129 get_r_debug (const int pid, const int is_elf64)
5130 {
5131 CORE_ADDR dynamic_memaddr;
5132 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5133 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5134
5135 dynamic_memaddr = get_dynamic (pid, is_elf64);
5136 if (dynamic_memaddr == 0)
5137 return (CORE_ADDR) -1;
5138
5139 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5140 {
5141 if (is_elf64)
5142 {
5143 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5144
5145 if (dyn->d_tag == DT_DEBUG)
5146 return dyn->d_un.d_val;
5147
5148 if (dyn->d_tag == DT_NULL)
5149 break;
5150 }
5151 else
5152 {
5153 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5154
5155 if (dyn->d_tag == DT_DEBUG)
5156 return dyn->d_un.d_val;
5157
5158 if (dyn->d_tag == DT_NULL)
5159 break;
5160 }
5161
5162 dynamic_memaddr += dyn_size;
5163 }
5164
5165 return (CORE_ADDR) -1;
5166 }
5167
5168 /* Read one pointer from MEMADDR in the inferior. */
5169
5170 static int
5171 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5172 {
5173 *ptr = 0;
5174 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5175 }
5176
5177 struct link_map_offsets
5178 {
5179 /* Offset and size of r_debug.r_version. */
5180 int r_version_offset;
5181
5182 /* Offset and size of r_debug.r_map. */
5183 int r_map_offset;
5184
5185 /* Offset to l_addr field in struct link_map. */
5186 int l_addr_offset;
5187
5188 /* Offset to l_name field in struct link_map. */
5189 int l_name_offset;
5190
5191 /* Offset to l_ld field in struct link_map. */
5192 int l_ld_offset;
5193
5194 /* Offset to l_next field in struct link_map. */
5195 int l_next_offset;
5196
5197 /* Offset to l_prev field in struct link_map. */
5198 int l_prev_offset;
5199 };
5200
5201 /* Construct qXfer:libraries:read reply. */
5202
5203 static int
5204 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5205 unsigned const char *writebuf,
5206 CORE_ADDR offset, int len)
5207 {
5208 char *document;
5209 unsigned document_len;
5210 struct process_info_private *const priv = current_process ()->private;
5211 char filename[PATH_MAX];
5212 int pid, is_elf64;
5213
5214 static const struct link_map_offsets lmo_32bit_offsets =
5215 {
5216 0, /* r_version offset. */
5217 4, /* r_debug.r_map offset. */
5218 0, /* l_addr offset in link_map. */
5219 4, /* l_name offset in link_map. */
5220 8, /* l_ld offset in link_map. */
5221 12, /* l_next offset in link_map. */
5222 16 /* l_prev offset in link_map. */
5223 };
5224
5225 static const struct link_map_offsets lmo_64bit_offsets =
5226 {
5227 0, /* r_version offset. */
5228 8, /* r_debug.r_map offset. */
5229 0, /* l_addr offset in link_map. */
5230 8, /* l_name offset in link_map. */
5231 16, /* l_ld offset in link_map. */
5232 24, /* l_next offset in link_map. */
5233 32 /* l_prev offset in link_map. */
5234 };
5235 const struct link_map_offsets *lmo;
5236
5237 if (writebuf != NULL)
5238 return -2;
5239 if (readbuf == NULL)
5240 return -1;
5241
5242 pid = lwpid_of (get_thread_lwp (current_inferior));
5243 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5244 is_elf64 = elf_64_file_p (filename);
5245 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5246
5247 if (priv->r_debug == 0)
5248 priv->r_debug = get_r_debug (pid, is_elf64);
5249
5250 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5251 {
5252 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5253 }
5254 else
5255 {
5256 int allocated = 1024;
5257 char *p;
5258 const int ptr_size = is_elf64 ? 8 : 4;
5259 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5260 int r_version, header_done = 0;
5261
5262 document = xmalloc (allocated);
5263 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5264 p = document + strlen (document);
5265
5266 r_version = 0;
5267 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5268 (unsigned char *) &r_version,
5269 sizeof (r_version)) != 0
5270 || r_version != 1)
5271 {
5272 warning ("unexpected r_debug version %d", r_version);
5273 goto done;
5274 }
5275
5276 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5277 &lm_addr, ptr_size) != 0)
5278 {
5279 warning ("unable to read r_map from 0x%lx",
5280 (long) priv->r_debug + lmo->r_map_offset);
5281 goto done;
5282 }
5283
5284 lm_prev = 0;
5285 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5286 &l_name, ptr_size) == 0
5287 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5288 &l_addr, ptr_size) == 0
5289 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5290 &l_ld, ptr_size) == 0
5291 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5292 &l_prev, ptr_size) == 0
5293 && read_one_ptr (lm_addr + lmo->l_next_offset,
5294 &l_next, ptr_size) == 0)
5295 {
5296 unsigned char libname[PATH_MAX];
5297
5298 if (lm_prev != l_prev)
5299 {
5300 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5301 (long) lm_prev, (long) l_prev);
5302 break;
5303 }
5304
5305 /* Not checking for error because reading may stop before
5306 we've got PATH_MAX worth of characters. */
5307 libname[0] = '\0';
5308 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5309 libname[sizeof (libname) - 1] = '\0';
5310 if (libname[0] != '\0')
5311 {
5312 /* 6x the size for xml_escape_text below. */
5313 size_t len = 6 * strlen ((char *) libname);
5314 char *name;
5315
5316 if (!header_done)
5317 {
5318 /* Terminate `<library-list-svr4'. */
5319 *p++ = '>';
5320 header_done = 1;
5321 }
5322
5323 while (allocated < p - document + len + 200)
5324 {
5325 /* Expand to guarantee sufficient storage. */
5326 uintptr_t document_len = p - document;
5327
5328 document = xrealloc (document, 2 * allocated);
5329 allocated *= 2;
5330 p = document + document_len;
5331 }
5332
5333 name = xml_escape_text ((char *) libname);
5334 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5335 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5336 name, (unsigned long) lm_addr,
5337 (unsigned long) l_addr, (unsigned long) l_ld);
5338 free (name);
5339 }
5340 else if (lm_prev == 0)
5341 {
5342 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5343 p = p + strlen (p);
5344 }
5345
5346 if (l_next == 0)
5347 break;
5348
5349 lm_prev = lm_addr;
5350 lm_addr = l_next;
5351 }
5352 done:
5353 strcpy (p, "</library-list-svr4>");
5354 }
5355
5356 document_len = strlen (document);
5357 if (offset < document_len)
5358 document_len -= offset;
5359 else
5360 document_len = 0;
5361 if (len > document_len)
5362 len = document_len;
5363
5364 memcpy (readbuf, document + offset, len);
5365 xfree (document);
5366
5367 return len;
5368 }
5369
5370 static struct target_ops linux_target_ops = {
5371 linux_create_inferior,
5372 linux_attach,
5373 linux_kill,
5374 linux_detach,
5375 linux_mourn,
5376 linux_join,
5377 linux_thread_alive,
5378 linux_resume,
5379 linux_wait,
5380 linux_fetch_registers,
5381 linux_store_registers,
5382 linux_prepare_to_access_memory,
5383 linux_done_accessing_memory,
5384 linux_read_memory,
5385 linux_write_memory,
5386 linux_look_up_symbols,
5387 linux_request_interrupt,
5388 linux_read_auxv,
5389 linux_insert_point,
5390 linux_remove_point,
5391 linux_stopped_by_watchpoint,
5392 linux_stopped_data_address,
5393 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5394 linux_read_offsets,
5395 #else
5396 NULL,
5397 #endif
5398 #ifdef USE_THREAD_DB
5399 thread_db_get_tls_address,
5400 #else
5401 NULL,
5402 #endif
5403 linux_qxfer_spu,
5404 hostio_last_error_from_errno,
5405 linux_qxfer_osdata,
5406 linux_xfer_siginfo,
5407 linux_supports_non_stop,
5408 linux_async,
5409 linux_start_non_stop,
5410 linux_supports_multi_process,
5411 #ifdef USE_THREAD_DB
5412 thread_db_handle_monitor_command,
5413 #else
5414 NULL,
5415 #endif
5416 linux_common_core_of_thread,
5417 linux_read_loadmap,
5418 linux_process_qsupported,
5419 linux_supports_tracepoints,
5420 linux_read_pc,
5421 linux_write_pc,
5422 linux_thread_stopped,
5423 NULL,
5424 linux_pause_all,
5425 linux_unpause_all,
5426 linux_cancel_breakpoints,
5427 linux_stabilize_threads,
5428 linux_install_fast_tracepoint_jump_pad,
5429 linux_emit_ops,
5430 linux_supports_disable_randomization,
5431 linux_get_min_fast_tracepoint_insn_len,
5432 linux_qxfer_libraries_svr4,
5433 };
5434
5435 static void
5436 linux_init_signals ()
5437 {
5438 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5439 to find what the cancel signal actually is. */
5440 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5441 signal (__SIGRTMIN+1, SIG_IGN);
5442 #endif
5443 }
5444
5445 void
5446 initialize_low (void)
5447 {
5448 struct sigaction sigchld_action;
5449 memset (&sigchld_action, 0, sizeof (sigchld_action));
5450 set_target_ops (&linux_target_ops);
5451 set_breakpoint_data (the_low_target.breakpoint,
5452 the_low_target.breakpoint_len);
5453 linux_init_signals ();
5454 linux_test_for_tracefork ();
5455 #ifdef HAVE_LINUX_REGSETS
5456 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5457 ;
5458 disabled_regsets = xmalloc (num_regsets);
5459 #endif
5460
5461 sigchld_action.sa_handler = sigchld_handler;
5462 sigemptyset (&sigchld_action.sa_mask);
5463 sigchld_action.sa_flags = SA_RESTART;
5464 sigaction (SIGCHLD, &sigchld_action, NULL);
5465 }
This page took 0.138475 seconds and 5 git commands to generate.