* amd64-sol2-tdep.c (amd64_sol2_gregset_reg_offset): Correct
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include "linux-ptrace.h"
28 #include "linux-procfs.h"
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #ifndef ELFMAG0
46 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
50 #include <elf.h>
51 #endif
52
53 #ifndef SPUFS_MAGIC
54 #define SPUFS_MAGIC 0x23c9b64e
55 #endif
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif
63
64 #ifndef O_LARGEFILE
65 #define O_LARGEFILE 0
66 #endif
67
68 #ifndef W_STOPCODE
69 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
70 #endif
71
72 /* This is the kernel's hard limit. Not to be confused with
73 SIGRTMIN. */
74 #ifndef __SIGRTMIN
75 #define __SIGRTMIN 32
76 #endif
77
78 #ifdef __UCLIBC__
79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
80 #define HAS_NOMMU
81 #endif
82 #endif
83
84 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
85 representation of the thread ID.
86
87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
88 the same as the LWP ID.
89
90 ``all_processes'' is keyed by the "overall process ID", which
91 GNU/Linux calls tgid, "thread group ID". */
92
93 struct inferior_list all_lwps;
94
95 /* A list of all unknown processes which receive stop signals. Some other
96 process will presumably claim each of these as forked children
97 momentarily. */
98
99 struct inferior_list stopped_pids;
100
101 /* FIXME this is a bit of a hack, and could be removed. */
102 int stopping_threads;
103
104 /* FIXME make into a target method? */
105 int using_threads = 1;
106
107 /* True if we're presently stabilizing threads (moving them out of
108 jump pads). */
109 static int stabilizing_threads;
110
111 /* This flag is true iff we've just created or attached to our first
112 inferior but it has not stopped yet. As soon as it does, we need
113 to call the low target's arch_setup callback. Doing this only on
114 the first inferior avoids reinializing the architecture on every
115 inferior, and avoids messing with the register caches of the
116 already running inferiors. NOTE: this assumes all inferiors under
117 control of gdbserver have the same architecture. */
118 static int new_inferior;
119
120 static void linux_resume_one_lwp (struct lwp_info *lwp,
121 int step, int signal, siginfo_t *info);
122 static void linux_resume (struct thread_resume *resume_info, size_t n);
123 static void stop_all_lwps (int suspend, struct lwp_info *except);
124 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
125 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
126 static void *add_lwp (ptid_t ptid);
127 static int linux_stopped_by_watchpoint (void);
128 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
129 static void proceed_all_lwps (void);
130 static int finish_step_over (struct lwp_info *lwp);
131 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
132 static int kill_lwp (unsigned long lwpid, int signo);
133 static void linux_enable_event_reporting (int pid);
134
135 /* True if the low target can hardware single-step. Such targets
136 don't need a BREAKPOINT_REINSERT_ADDR callback. */
137
138 static int
139 can_hardware_single_step (void)
140 {
141 return (the_low_target.breakpoint_reinsert_addr == NULL);
142 }
143
144 /* True if the low target supports memory breakpoints. If so, we'll
145 have a GET_PC implementation. */
146
147 static int
148 supports_breakpoints (void)
149 {
150 return (the_low_target.get_pc != NULL);
151 }
152
153 /* Returns true if this target can support fast tracepoints. This
154 does not mean that the in-process agent has been loaded in the
155 inferior. */
156
157 static int
158 supports_fast_tracepoints (void)
159 {
160 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
161 }
162
163 struct pending_signals
164 {
165 int signal;
166 siginfo_t info;
167 struct pending_signals *prev;
168 };
169
170 #define PTRACE_ARG3_TYPE void *
171 #define PTRACE_ARG4_TYPE void *
172 #define PTRACE_XFER_TYPE long
173
174 #ifdef HAVE_LINUX_REGSETS
175 static char *disabled_regsets;
176 static int num_regsets;
177 #endif
178
179 /* The read/write ends of the pipe registered as waitable file in the
180 event loop. */
181 static int linux_event_pipe[2] = { -1, -1 };
182
183 /* True if we're currently in async mode. */
184 #define target_is_async_p() (linux_event_pipe[0] != -1)
185
186 static void send_sigstop (struct lwp_info *lwp);
187 static void wait_for_sigstop (struct inferior_list_entry *entry);
188
189 /* Return non-zero if HEADER is a 64-bit ELF file. */
190
191 static int
192 elf_64_header_p (const Elf64_Ehdr *header)
193 {
194 return (header->e_ident[EI_MAG0] == ELFMAG0
195 && header->e_ident[EI_MAG1] == ELFMAG1
196 && header->e_ident[EI_MAG2] == ELFMAG2
197 && header->e_ident[EI_MAG3] == ELFMAG3
198 && header->e_ident[EI_CLASS] == ELFCLASS64);
199 }
200
201 /* Return non-zero if FILE is a 64-bit ELF file,
202 zero if the file is not a 64-bit ELF file,
203 and -1 if the file is not accessible or doesn't exist. */
204
205 static int
206 elf_64_file_p (const char *file)
207 {
208 Elf64_Ehdr header;
209 int fd;
210
211 fd = open (file, O_RDONLY);
212 if (fd < 0)
213 return -1;
214
215 if (read (fd, &header, sizeof (header)) != sizeof (header))
216 {
217 close (fd);
218 return 0;
219 }
220 close (fd);
221
222 return elf_64_header_p (&header);
223 }
224
225 /* Accepts an integer PID; Returns true if the executable PID is
226 running is a 64-bit ELF file.. */
227
228 int
229 linux_pid_exe_is_elf_64_file (int pid)
230 {
231 char file[MAXPATHLEN];
232
233 sprintf (file, "/proc/%d/exe", pid);
234 return elf_64_file_p (file);
235 }
236
237 static void
238 delete_lwp (struct lwp_info *lwp)
239 {
240 remove_thread (get_lwp_thread (lwp));
241 remove_inferior (&all_lwps, &lwp->head);
242 free (lwp->arch_private);
243 free (lwp);
244 }
245
246 /* Add a process to the common process list, and set its private
247 data. */
248
249 static struct process_info *
250 linux_add_process (int pid, int attached)
251 {
252 struct process_info *proc;
253
254 /* Is this the first process? If so, then set the arch. */
255 if (all_processes.head == NULL)
256 new_inferior = 1;
257
258 proc = add_process (pid, attached);
259 proc->private = xcalloc (1, sizeof (*proc->private));
260
261 if (the_low_target.new_process != NULL)
262 proc->private->arch_private = the_low_target.new_process ();
263
264 return proc;
265 }
266
267 /* Wrapper function for waitpid which handles EINTR, and emulates
268 __WALL for systems where that is not available. */
269
270 static int
271 my_waitpid (int pid, int *status, int flags)
272 {
273 int ret, out_errno;
274
275 if (debug_threads)
276 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
277
278 if (flags & __WALL)
279 {
280 sigset_t block_mask, org_mask, wake_mask;
281 int wnohang;
282
283 wnohang = (flags & WNOHANG) != 0;
284 flags &= ~(__WALL | __WCLONE);
285 flags |= WNOHANG;
286
287 /* Block all signals while here. This avoids knowing about
288 LinuxThread's signals. */
289 sigfillset (&block_mask);
290 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
291
292 /* ... except during the sigsuspend below. */
293 sigemptyset (&wake_mask);
294
295 while (1)
296 {
297 /* Since all signals are blocked, there's no need to check
298 for EINTR here. */
299 ret = waitpid (pid, status, flags);
300 out_errno = errno;
301
302 if (ret == -1 && out_errno != ECHILD)
303 break;
304 else if (ret > 0)
305 break;
306
307 if (flags & __WCLONE)
308 {
309 /* We've tried both flavors now. If WNOHANG is set,
310 there's nothing else to do, just bail out. */
311 if (wnohang)
312 break;
313
314 if (debug_threads)
315 fprintf (stderr, "blocking\n");
316
317 /* Block waiting for signals. */
318 sigsuspend (&wake_mask);
319 }
320
321 flags ^= __WCLONE;
322 }
323
324 sigprocmask (SIG_SETMASK, &org_mask, NULL);
325 }
326 else
327 {
328 do
329 ret = waitpid (pid, status, flags);
330 while (ret == -1 && errno == EINTR);
331 out_errno = errno;
332 }
333
334 if (debug_threads)
335 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
336 pid, flags, status ? *status : -1, ret);
337
338 errno = out_errno;
339 return ret;
340 }
341
342 /* Handle a GNU/Linux extended wait response. If we see a clone
343 event, we need to add the new LWP to our list (and not report the
344 trap to higher layers). */
345
346 static void
347 handle_extended_wait (struct lwp_info *event_child, int wstat)
348 {
349 int event = wstat >> 16;
350 struct lwp_info *new_lwp;
351
352 if (event == PTRACE_EVENT_CLONE)
353 {
354 ptid_t ptid;
355 unsigned long new_pid;
356 int ret, status = W_STOPCODE (SIGSTOP);
357
358 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
359
360 /* If we haven't already seen the new PID stop, wait for it now. */
361 if (! pull_pid_from_list (&stopped_pids, new_pid))
362 {
363 /* The new child has a pending SIGSTOP. We can't affect it until it
364 hits the SIGSTOP, but we're already attached. */
365
366 ret = my_waitpid (new_pid, &status, __WALL);
367
368 if (ret == -1)
369 perror_with_name ("waiting for new child");
370 else if (ret != new_pid)
371 warning ("wait returned unexpected PID %d", ret);
372 else if (!WIFSTOPPED (status))
373 warning ("wait returned unexpected status 0x%x", status);
374 }
375
376 linux_enable_event_reporting (new_pid);
377
378 ptid = ptid_build (pid_of (event_child), new_pid, 0);
379 new_lwp = (struct lwp_info *) add_lwp (ptid);
380 add_thread (ptid, new_lwp);
381
382 /* Either we're going to immediately resume the new thread
383 or leave it stopped. linux_resume_one_lwp is a nop if it
384 thinks the thread is currently running, so set this first
385 before calling linux_resume_one_lwp. */
386 new_lwp->stopped = 1;
387
388 /* Normally we will get the pending SIGSTOP. But in some cases
389 we might get another signal delivered to the group first.
390 If we do get another signal, be sure not to lose it. */
391 if (WSTOPSIG (status) == SIGSTOP)
392 {
393 if (stopping_threads)
394 new_lwp->stop_pc = get_stop_pc (new_lwp);
395 else
396 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
397 }
398 else
399 {
400 new_lwp->stop_expected = 1;
401
402 if (stopping_threads)
403 {
404 new_lwp->stop_pc = get_stop_pc (new_lwp);
405 new_lwp->status_pending_p = 1;
406 new_lwp->status_pending = status;
407 }
408 else
409 /* Pass the signal on. This is what GDB does - except
410 shouldn't we really report it instead? */
411 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
412 }
413
414 /* Always resume the current thread. If we are stopping
415 threads, it will have a pending SIGSTOP; we may as well
416 collect it now. */
417 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
418 }
419 }
420
421 /* Return the PC as read from the regcache of LWP, without any
422 adjustment. */
423
424 static CORE_ADDR
425 get_pc (struct lwp_info *lwp)
426 {
427 struct thread_info *saved_inferior;
428 struct regcache *regcache;
429 CORE_ADDR pc;
430
431 if (the_low_target.get_pc == NULL)
432 return 0;
433
434 saved_inferior = current_inferior;
435 current_inferior = get_lwp_thread (lwp);
436
437 regcache = get_thread_regcache (current_inferior, 1);
438 pc = (*the_low_target.get_pc) (regcache);
439
440 if (debug_threads)
441 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
442
443 current_inferior = saved_inferior;
444 return pc;
445 }
446
447 /* This function should only be called if LWP got a SIGTRAP.
448 The SIGTRAP could mean several things.
449
450 On i386, where decr_pc_after_break is non-zero:
451 If we were single-stepping this process using PTRACE_SINGLESTEP,
452 we will get only the one SIGTRAP (even if the instruction we
453 stepped over was a breakpoint). The value of $eip will be the
454 next instruction.
455 If we continue the process using PTRACE_CONT, we will get a
456 SIGTRAP when we hit a breakpoint. The value of $eip will be
457 the instruction after the breakpoint (i.e. needs to be
458 decremented). If we report the SIGTRAP to GDB, we must also
459 report the undecremented PC. If we cancel the SIGTRAP, we
460 must resume at the decremented PC.
461
462 (Presumably, not yet tested) On a non-decr_pc_after_break machine
463 with hardware or kernel single-step:
464 If we single-step over a breakpoint instruction, our PC will
465 point at the following instruction. If we continue and hit a
466 breakpoint instruction, our PC will point at the breakpoint
467 instruction. */
468
469 static CORE_ADDR
470 get_stop_pc (struct lwp_info *lwp)
471 {
472 CORE_ADDR stop_pc;
473
474 if (the_low_target.get_pc == NULL)
475 return 0;
476
477 stop_pc = get_pc (lwp);
478
479 if (WSTOPSIG (lwp->last_status) == SIGTRAP
480 && !lwp->stepping
481 && !lwp->stopped_by_watchpoint
482 && lwp->last_status >> 16 == 0)
483 stop_pc -= the_low_target.decr_pc_after_break;
484
485 if (debug_threads)
486 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
487
488 return stop_pc;
489 }
490
491 static void *
492 add_lwp (ptid_t ptid)
493 {
494 struct lwp_info *lwp;
495
496 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
497 memset (lwp, 0, sizeof (*lwp));
498
499 lwp->head.id = ptid;
500
501 if (the_low_target.new_thread != NULL)
502 lwp->arch_private = the_low_target.new_thread ();
503
504 add_inferior_to_list (&all_lwps, &lwp->head);
505
506 return lwp;
507 }
508
509 /* Start an inferior process and returns its pid.
510 ALLARGS is a vector of program-name and args. */
511
512 static int
513 linux_create_inferior (char *program, char **allargs)
514 {
515 #ifdef HAVE_PERSONALITY
516 int personality_orig = 0, personality_set = 0;
517 #endif
518 struct lwp_info *new_lwp;
519 int pid;
520 ptid_t ptid;
521
522 #ifdef HAVE_PERSONALITY
523 if (disable_randomization)
524 {
525 errno = 0;
526 personality_orig = personality (0xffffffff);
527 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
528 {
529 personality_set = 1;
530 personality (personality_orig | ADDR_NO_RANDOMIZE);
531 }
532 if (errno != 0 || (personality_set
533 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
534 warning ("Error disabling address space randomization: %s",
535 strerror (errno));
536 }
537 #endif
538
539 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
540 pid = vfork ();
541 #else
542 pid = fork ();
543 #endif
544 if (pid < 0)
545 perror_with_name ("fork");
546
547 if (pid == 0)
548 {
549 ptrace (PTRACE_TRACEME, 0, 0, 0);
550
551 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
552 signal (__SIGRTMIN + 1, SIG_DFL);
553 #endif
554
555 setpgid (0, 0);
556
557 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
558 stdout to stderr so that inferior i/o doesn't corrupt the connection.
559 Also, redirect stdin to /dev/null. */
560 if (remote_connection_is_stdio ())
561 {
562 close (0);
563 open ("/dev/null", O_RDONLY);
564 dup2 (2, 1);
565 if (write (2, "stdin/stdout redirected\n",
566 sizeof ("stdin/stdout redirected\n") - 1) < 0)
567 /* Errors ignored. */;
568 }
569
570 execv (program, allargs);
571 if (errno == ENOENT)
572 execvp (program, allargs);
573
574 fprintf (stderr, "Cannot exec %s: %s.\n", program,
575 strerror (errno));
576 fflush (stderr);
577 _exit (0177);
578 }
579
580 #ifdef HAVE_PERSONALITY
581 if (personality_set)
582 {
583 errno = 0;
584 personality (personality_orig);
585 if (errno != 0)
586 warning ("Error restoring address space randomization: %s",
587 strerror (errno));
588 }
589 #endif
590
591 linux_add_process (pid, 0);
592
593 ptid = ptid_build (pid, pid, 0);
594 new_lwp = add_lwp (ptid);
595 add_thread (ptid, new_lwp);
596 new_lwp->must_set_ptrace_flags = 1;
597
598 return pid;
599 }
600
601 /* Attach to an inferior process. */
602
603 static void
604 linux_attach_lwp_1 (unsigned long lwpid, int initial)
605 {
606 ptid_t ptid;
607 struct lwp_info *new_lwp;
608
609 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
610 {
611 if (!initial)
612 {
613 /* If we fail to attach to an LWP, just warn. */
614 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
615 strerror (errno), errno);
616 fflush (stderr);
617 return;
618 }
619 else
620 /* If we fail to attach to a process, report an error. */
621 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
622 strerror (errno), errno);
623 }
624
625 if (initial)
626 /* If lwp is the tgid, we handle adding existing threads later.
627 Otherwise we just add lwp without bothering about any other
628 threads. */
629 ptid = ptid_build (lwpid, lwpid, 0);
630 else
631 {
632 /* Note that extracting the pid from the current inferior is
633 safe, since we're always called in the context of the same
634 process as this new thread. */
635 int pid = pid_of (get_thread_lwp (current_inferior));
636 ptid = ptid_build (pid, lwpid, 0);
637 }
638
639 new_lwp = (struct lwp_info *) add_lwp (ptid);
640 add_thread (ptid, new_lwp);
641
642 /* We need to wait for SIGSTOP before being able to make the next
643 ptrace call on this LWP. */
644 new_lwp->must_set_ptrace_flags = 1;
645
646 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
647 brings it to a halt.
648
649 There are several cases to consider here:
650
651 1) gdbserver has already attached to the process and is being notified
652 of a new thread that is being created.
653 In this case we should ignore that SIGSTOP and resume the
654 process. This is handled below by setting stop_expected = 1,
655 and the fact that add_thread sets last_resume_kind ==
656 resume_continue.
657
658 2) This is the first thread (the process thread), and we're attaching
659 to it via attach_inferior.
660 In this case we want the process thread to stop.
661 This is handled by having linux_attach set last_resume_kind ==
662 resume_stop after we return.
663
664 If the pid we are attaching to is also the tgid, we attach to and
665 stop all the existing threads. Otherwise, we attach to pid and
666 ignore any other threads in the same group as this pid.
667
668 3) GDB is connecting to gdbserver and is requesting an enumeration of all
669 existing threads.
670 In this case we want the thread to stop.
671 FIXME: This case is currently not properly handled.
672 We should wait for the SIGSTOP but don't. Things work apparently
673 because enough time passes between when we ptrace (ATTACH) and when
674 gdb makes the next ptrace call on the thread.
675
676 On the other hand, if we are currently trying to stop all threads, we
677 should treat the new thread as if we had sent it a SIGSTOP. This works
678 because we are guaranteed that the add_lwp call above added us to the
679 end of the list, and so the new thread has not yet reached
680 wait_for_sigstop (but will). */
681 new_lwp->stop_expected = 1;
682 }
683
684 void
685 linux_attach_lwp (unsigned long lwpid)
686 {
687 linux_attach_lwp_1 (lwpid, 0);
688 }
689
690 /* Attach to PID. If PID is the tgid, attach to it and all
691 of its threads. */
692
693 int
694 linux_attach (unsigned long pid)
695 {
696 /* Attach to PID. We will check for other threads
697 soon. */
698 linux_attach_lwp_1 (pid, 1);
699 linux_add_process (pid, 1);
700
701 if (!non_stop)
702 {
703 struct thread_info *thread;
704
705 /* Don't ignore the initial SIGSTOP if we just attached to this
706 process. It will be collected by wait shortly. */
707 thread = find_thread_ptid (ptid_build (pid, pid, 0));
708 thread->last_resume_kind = resume_stop;
709 }
710
711 if (linux_proc_get_tgid (pid) == pid)
712 {
713 DIR *dir;
714 char pathname[128];
715
716 sprintf (pathname, "/proc/%ld/task", pid);
717
718 dir = opendir (pathname);
719
720 if (!dir)
721 {
722 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
723 fflush (stderr);
724 }
725 else
726 {
727 /* At this point we attached to the tgid. Scan the task for
728 existing threads. */
729 unsigned long lwp;
730 int new_threads_found;
731 int iterations = 0;
732 struct dirent *dp;
733
734 while (iterations < 2)
735 {
736 new_threads_found = 0;
737 /* Add all the other threads. While we go through the
738 threads, new threads may be spawned. Cycle through
739 the list of threads until we have done two iterations without
740 finding new threads. */
741 while ((dp = readdir (dir)) != NULL)
742 {
743 /* Fetch one lwp. */
744 lwp = strtoul (dp->d_name, NULL, 10);
745
746 /* Is this a new thread? */
747 if (lwp
748 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
749 {
750 linux_attach_lwp_1 (lwp, 0);
751 new_threads_found++;
752
753 if (debug_threads)
754 fprintf (stderr, "\
755 Found and attached to new lwp %ld\n", lwp);
756 }
757 }
758
759 if (!new_threads_found)
760 iterations++;
761 else
762 iterations = 0;
763
764 rewinddir (dir);
765 }
766 closedir (dir);
767 }
768 }
769
770 return 0;
771 }
772
773 struct counter
774 {
775 int pid;
776 int count;
777 };
778
779 static int
780 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
781 {
782 struct counter *counter = args;
783
784 if (ptid_get_pid (entry->id) == counter->pid)
785 {
786 if (++counter->count > 1)
787 return 1;
788 }
789
790 return 0;
791 }
792
793 static int
794 last_thread_of_process_p (struct thread_info *thread)
795 {
796 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
797 int pid = ptid_get_pid (ptid);
798 struct counter counter = { pid , 0 };
799
800 return (find_inferior (&all_threads,
801 second_thread_of_pid_p, &counter) == NULL);
802 }
803
804 /* Kill LWP. */
805
806 static void
807 linux_kill_one_lwp (struct lwp_info *lwp)
808 {
809 int pid = lwpid_of (lwp);
810
811 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
812 there is no signal context, and ptrace(PTRACE_KILL) (or
813 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
814 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
815 alternative is to kill with SIGKILL. We only need one SIGKILL
816 per process, not one for each thread. But since we still support
817 linuxthreads, and we also support debugging programs using raw
818 clone without CLONE_THREAD, we send one for each thread. For
819 years, we used PTRACE_KILL only, so we're being a bit paranoid
820 about some old kernels where PTRACE_KILL might work better
821 (dubious if there are any such, but that's why it's paranoia), so
822 we try SIGKILL first, PTRACE_KILL second, and so we're fine
823 everywhere. */
824
825 errno = 0;
826 kill (pid, SIGKILL);
827 if (debug_threads)
828 fprintf (stderr,
829 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
830 target_pid_to_str (ptid_of (lwp)),
831 errno ? strerror (errno) : "OK");
832
833 errno = 0;
834 ptrace (PTRACE_KILL, pid, 0, 0);
835 if (debug_threads)
836 fprintf (stderr,
837 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
838 target_pid_to_str (ptid_of (lwp)),
839 errno ? strerror (errno) : "OK");
840 }
841
842 /* Callback for `find_inferior'. Kills an lwp of a given process,
843 except the leader. */
844
845 static int
846 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
847 {
848 struct thread_info *thread = (struct thread_info *) entry;
849 struct lwp_info *lwp = get_thread_lwp (thread);
850 int wstat;
851 int pid = * (int *) args;
852
853 if (ptid_get_pid (entry->id) != pid)
854 return 0;
855
856 /* We avoid killing the first thread here, because of a Linux kernel (at
857 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
858 the children get a chance to be reaped, it will remain a zombie
859 forever. */
860
861 if (lwpid_of (lwp) == pid)
862 {
863 if (debug_threads)
864 fprintf (stderr, "lkop: is last of process %s\n",
865 target_pid_to_str (entry->id));
866 return 0;
867 }
868
869 do
870 {
871 linux_kill_one_lwp (lwp);
872
873 /* Make sure it died. The loop is most likely unnecessary. */
874 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
875 } while (pid > 0 && WIFSTOPPED (wstat));
876
877 return 0;
878 }
879
880 static int
881 linux_kill (int pid)
882 {
883 struct process_info *process;
884 struct lwp_info *lwp;
885 int wstat;
886 int lwpid;
887
888 process = find_process_pid (pid);
889 if (process == NULL)
890 return -1;
891
892 /* If we're killing a running inferior, make sure it is stopped
893 first, as PTRACE_KILL will not work otherwise. */
894 stop_all_lwps (0, NULL);
895
896 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
897
898 /* See the comment in linux_kill_one_lwp. We did not kill the first
899 thread in the list, so do so now. */
900 lwp = find_lwp_pid (pid_to_ptid (pid));
901
902 if (lwp == NULL)
903 {
904 if (debug_threads)
905 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
906 lwpid_of (lwp), pid);
907 }
908 else
909 {
910 if (debug_threads)
911 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
912 lwpid_of (lwp), pid);
913
914 do
915 {
916 linux_kill_one_lwp (lwp);
917
918 /* Make sure it died. The loop is most likely unnecessary. */
919 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
920 } while (lwpid > 0 && WIFSTOPPED (wstat));
921 }
922
923 the_target->mourn (process);
924
925 /* Since we presently can only stop all lwps of all processes, we
926 need to unstop lwps of other processes. */
927 unstop_all_lwps (0, NULL);
928 return 0;
929 }
930
931 static int
932 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
933 {
934 struct thread_info *thread = (struct thread_info *) entry;
935 struct lwp_info *lwp = get_thread_lwp (thread);
936 int pid = * (int *) args;
937
938 if (ptid_get_pid (entry->id) != pid)
939 return 0;
940
941 /* If this process is stopped but is expecting a SIGSTOP, then make
942 sure we take care of that now. This isn't absolutely guaranteed
943 to collect the SIGSTOP, but is fairly likely to. */
944 if (lwp->stop_expected)
945 {
946 int wstat;
947 /* Clear stop_expected, so that the SIGSTOP will be reported. */
948 lwp->stop_expected = 0;
949 linux_resume_one_lwp (lwp, 0, 0, NULL);
950 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
951 }
952
953 /* Flush any pending changes to the process's registers. */
954 regcache_invalidate_one ((struct inferior_list_entry *)
955 get_lwp_thread (lwp));
956
957 /* Finally, let it resume. */
958 if (the_low_target.prepare_to_resume != NULL)
959 the_low_target.prepare_to_resume (lwp);
960 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
961
962 delete_lwp (lwp);
963 return 0;
964 }
965
966 static int
967 linux_detach (int pid)
968 {
969 struct process_info *process;
970
971 process = find_process_pid (pid);
972 if (process == NULL)
973 return -1;
974
975 /* Stop all threads before detaching. First, ptrace requires that
976 the thread is stopped to sucessfully detach. Second, thread_db
977 may need to uninstall thread event breakpoints from memory, which
978 only works with a stopped process anyway. */
979 stop_all_lwps (0, NULL);
980
981 #ifdef USE_THREAD_DB
982 thread_db_detach (process);
983 #endif
984
985 /* Stabilize threads (move out of jump pads). */
986 stabilize_threads ();
987
988 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
989
990 the_target->mourn (process);
991
992 /* Since we presently can only stop all lwps of all processes, we
993 need to unstop lwps of other processes. */
994 unstop_all_lwps (0, NULL);
995 return 0;
996 }
997
998 /* Remove all LWPs that belong to process PROC from the lwp list. */
999
1000 static int
1001 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1002 {
1003 struct lwp_info *lwp = (struct lwp_info *) entry;
1004 struct process_info *process = proc;
1005
1006 if (pid_of (lwp) == pid_of (process))
1007 delete_lwp (lwp);
1008
1009 return 0;
1010 }
1011
1012 static void
1013 linux_mourn (struct process_info *process)
1014 {
1015 struct process_info_private *priv;
1016
1017 #ifdef USE_THREAD_DB
1018 thread_db_mourn (process);
1019 #endif
1020
1021 find_inferior (&all_lwps, delete_lwp_callback, process);
1022
1023 /* Freeing all private data. */
1024 priv = process->private;
1025 free (priv->arch_private);
1026 free (priv);
1027 process->private = NULL;
1028
1029 remove_process (process);
1030 }
1031
1032 static void
1033 linux_join (int pid)
1034 {
1035 int status, ret;
1036
1037 do {
1038 ret = my_waitpid (pid, &status, 0);
1039 if (WIFEXITED (status) || WIFSIGNALED (status))
1040 break;
1041 } while (ret != -1 || errno != ECHILD);
1042 }
1043
1044 /* Return nonzero if the given thread is still alive. */
1045 static int
1046 linux_thread_alive (ptid_t ptid)
1047 {
1048 struct lwp_info *lwp = find_lwp_pid (ptid);
1049
1050 /* We assume we always know if a thread exits. If a whole process
1051 exited but we still haven't been able to report it to GDB, we'll
1052 hold on to the last lwp of the dead process. */
1053 if (lwp != NULL)
1054 return !lwp->dead;
1055 else
1056 return 0;
1057 }
1058
1059 /* Return 1 if this lwp has an interesting status pending. */
1060 static int
1061 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1062 {
1063 struct lwp_info *lwp = (struct lwp_info *) entry;
1064 ptid_t ptid = * (ptid_t *) arg;
1065 struct thread_info *thread;
1066
1067 /* Check if we're only interested in events from a specific process
1068 or its lwps. */
1069 if (!ptid_equal (minus_one_ptid, ptid)
1070 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1071 return 0;
1072
1073 thread = get_lwp_thread (lwp);
1074
1075 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1076 report any status pending the LWP may have. */
1077 if (thread->last_resume_kind == resume_stop
1078 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1079 return 0;
1080
1081 return lwp->status_pending_p;
1082 }
1083
1084 static int
1085 same_lwp (struct inferior_list_entry *entry, void *data)
1086 {
1087 ptid_t ptid = *(ptid_t *) data;
1088 int lwp;
1089
1090 if (ptid_get_lwp (ptid) != 0)
1091 lwp = ptid_get_lwp (ptid);
1092 else
1093 lwp = ptid_get_pid (ptid);
1094
1095 if (ptid_get_lwp (entry->id) == lwp)
1096 return 1;
1097
1098 return 0;
1099 }
1100
1101 struct lwp_info *
1102 find_lwp_pid (ptid_t ptid)
1103 {
1104 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1105 }
1106
1107 static struct lwp_info *
1108 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1109 {
1110 int ret;
1111 int to_wait_for = -1;
1112 struct lwp_info *child = NULL;
1113
1114 if (debug_threads)
1115 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1116
1117 if (ptid_equal (ptid, minus_one_ptid))
1118 to_wait_for = -1; /* any child */
1119 else
1120 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1121
1122 options |= __WALL;
1123
1124 retry:
1125
1126 ret = my_waitpid (to_wait_for, wstatp, options);
1127 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1128 return NULL;
1129 else if (ret == -1)
1130 perror_with_name ("waitpid");
1131
1132 if (debug_threads
1133 && (!WIFSTOPPED (*wstatp)
1134 || (WSTOPSIG (*wstatp) != 32
1135 && WSTOPSIG (*wstatp) != 33)))
1136 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1137
1138 child = find_lwp_pid (pid_to_ptid (ret));
1139
1140 /* If we didn't find a process, one of two things presumably happened:
1141 - A process we started and then detached from has exited. Ignore it.
1142 - A process we are controlling has forked and the new child's stop
1143 was reported to us by the kernel. Save its PID. */
1144 if (child == NULL && WIFSTOPPED (*wstatp))
1145 {
1146 add_pid_to_list (&stopped_pids, ret);
1147 goto retry;
1148 }
1149 else if (child == NULL)
1150 goto retry;
1151
1152 child->stopped = 1;
1153
1154 child->last_status = *wstatp;
1155
1156 /* Architecture-specific setup after inferior is running.
1157 This needs to happen after we have attached to the inferior
1158 and it is stopped for the first time, but before we access
1159 any inferior registers. */
1160 if (new_inferior)
1161 {
1162 the_low_target.arch_setup ();
1163 #ifdef HAVE_LINUX_REGSETS
1164 memset (disabled_regsets, 0, num_regsets);
1165 #endif
1166 new_inferior = 0;
1167 }
1168
1169 /* Fetch the possibly triggered data watchpoint info and store it in
1170 CHILD.
1171
1172 On some archs, like x86, that use debug registers to set
1173 watchpoints, it's possible that the way to know which watched
1174 address trapped, is to check the register that is used to select
1175 which address to watch. Problem is, between setting the
1176 watchpoint and reading back which data address trapped, the user
1177 may change the set of watchpoints, and, as a consequence, GDB
1178 changes the debug registers in the inferior. To avoid reading
1179 back a stale stopped-data-address when that happens, we cache in
1180 LP the fact that a watchpoint trapped, and the corresponding data
1181 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1182 changes the debug registers meanwhile, we have the cached data we
1183 can rely on. */
1184
1185 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1186 {
1187 if (the_low_target.stopped_by_watchpoint == NULL)
1188 {
1189 child->stopped_by_watchpoint = 0;
1190 }
1191 else
1192 {
1193 struct thread_info *saved_inferior;
1194
1195 saved_inferior = current_inferior;
1196 current_inferior = get_lwp_thread (child);
1197
1198 child->stopped_by_watchpoint
1199 = the_low_target.stopped_by_watchpoint ();
1200
1201 if (child->stopped_by_watchpoint)
1202 {
1203 if (the_low_target.stopped_data_address != NULL)
1204 child->stopped_data_address
1205 = the_low_target.stopped_data_address ();
1206 else
1207 child->stopped_data_address = 0;
1208 }
1209
1210 current_inferior = saved_inferior;
1211 }
1212 }
1213
1214 /* Store the STOP_PC, with adjustment applied. This depends on the
1215 architecture being defined already (so that CHILD has a valid
1216 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1217 not). */
1218 if (WIFSTOPPED (*wstatp))
1219 child->stop_pc = get_stop_pc (child);
1220
1221 if (debug_threads
1222 && WIFSTOPPED (*wstatp)
1223 && the_low_target.get_pc != NULL)
1224 {
1225 struct thread_info *saved_inferior = current_inferior;
1226 struct regcache *regcache;
1227 CORE_ADDR pc;
1228
1229 current_inferior = get_lwp_thread (child);
1230 regcache = get_thread_regcache (current_inferior, 1);
1231 pc = (*the_low_target.get_pc) (regcache);
1232 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1233 current_inferior = saved_inferior;
1234 }
1235
1236 return child;
1237 }
1238
1239 /* This function should only be called if the LWP got a SIGTRAP.
1240
1241 Handle any tracepoint steps or hits. Return true if a tracepoint
1242 event was handled, 0 otherwise. */
1243
1244 static int
1245 handle_tracepoints (struct lwp_info *lwp)
1246 {
1247 struct thread_info *tinfo = get_lwp_thread (lwp);
1248 int tpoint_related_event = 0;
1249
1250 /* If this tracepoint hit causes a tracing stop, we'll immediately
1251 uninsert tracepoints. To do this, we temporarily pause all
1252 threads, unpatch away, and then unpause threads. We need to make
1253 sure the unpausing doesn't resume LWP too. */
1254 lwp->suspended++;
1255
1256 /* And we need to be sure that any all-threads-stopping doesn't try
1257 to move threads out of the jump pads, as it could deadlock the
1258 inferior (LWP could be in the jump pad, maybe even holding the
1259 lock.) */
1260
1261 /* Do any necessary step collect actions. */
1262 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1263
1264 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1265
1266 /* See if we just hit a tracepoint and do its main collect
1267 actions. */
1268 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1269
1270 lwp->suspended--;
1271
1272 gdb_assert (lwp->suspended == 0);
1273 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1274
1275 if (tpoint_related_event)
1276 {
1277 if (debug_threads)
1278 fprintf (stderr, "got a tracepoint event\n");
1279 return 1;
1280 }
1281
1282 return 0;
1283 }
1284
1285 /* Convenience wrapper. Returns true if LWP is presently collecting a
1286 fast tracepoint. */
1287
1288 static int
1289 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1290 struct fast_tpoint_collect_status *status)
1291 {
1292 CORE_ADDR thread_area;
1293
1294 if (the_low_target.get_thread_area == NULL)
1295 return 0;
1296
1297 /* Get the thread area address. This is used to recognize which
1298 thread is which when tracing with the in-process agent library.
1299 We don't read anything from the address, and treat it as opaque;
1300 it's the address itself that we assume is unique per-thread. */
1301 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1302 return 0;
1303
1304 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1305 }
1306
1307 /* The reason we resume in the caller, is because we want to be able
1308 to pass lwp->status_pending as WSTAT, and we need to clear
1309 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1310 refuses to resume. */
1311
1312 static int
1313 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1314 {
1315 struct thread_info *saved_inferior;
1316
1317 saved_inferior = current_inferior;
1318 current_inferior = get_lwp_thread (lwp);
1319
1320 if ((wstat == NULL
1321 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1322 && supports_fast_tracepoints ()
1323 && in_process_agent_loaded ())
1324 {
1325 struct fast_tpoint_collect_status status;
1326 int r;
1327
1328 if (debug_threads)
1329 fprintf (stderr, "\
1330 Checking whether LWP %ld needs to move out of the jump pad.\n",
1331 lwpid_of (lwp));
1332
1333 r = linux_fast_tracepoint_collecting (lwp, &status);
1334
1335 if (wstat == NULL
1336 || (WSTOPSIG (*wstat) != SIGILL
1337 && WSTOPSIG (*wstat) != SIGFPE
1338 && WSTOPSIG (*wstat) != SIGSEGV
1339 && WSTOPSIG (*wstat) != SIGBUS))
1340 {
1341 lwp->collecting_fast_tracepoint = r;
1342
1343 if (r != 0)
1344 {
1345 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1346 {
1347 /* Haven't executed the original instruction yet.
1348 Set breakpoint there, and wait till it's hit,
1349 then single-step until exiting the jump pad. */
1350 lwp->exit_jump_pad_bkpt
1351 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1352 }
1353
1354 if (debug_threads)
1355 fprintf (stderr, "\
1356 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1357 lwpid_of (lwp));
1358 current_inferior = saved_inferior;
1359
1360 return 1;
1361 }
1362 }
1363 else
1364 {
1365 /* If we get a synchronous signal while collecting, *and*
1366 while executing the (relocated) original instruction,
1367 reset the PC to point at the tpoint address, before
1368 reporting to GDB. Otherwise, it's an IPA lib bug: just
1369 report the signal to GDB, and pray for the best. */
1370
1371 lwp->collecting_fast_tracepoint = 0;
1372
1373 if (r != 0
1374 && (status.adjusted_insn_addr <= lwp->stop_pc
1375 && lwp->stop_pc < status.adjusted_insn_addr_end))
1376 {
1377 siginfo_t info;
1378 struct regcache *regcache;
1379
1380 /* The si_addr on a few signals references the address
1381 of the faulting instruction. Adjust that as
1382 well. */
1383 if ((WSTOPSIG (*wstat) == SIGILL
1384 || WSTOPSIG (*wstat) == SIGFPE
1385 || WSTOPSIG (*wstat) == SIGBUS
1386 || WSTOPSIG (*wstat) == SIGSEGV)
1387 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1388 /* Final check just to make sure we don't clobber
1389 the siginfo of non-kernel-sent signals. */
1390 && (uintptr_t) info.si_addr == lwp->stop_pc)
1391 {
1392 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1393 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1394 }
1395
1396 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1397 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1398 lwp->stop_pc = status.tpoint_addr;
1399
1400 /* Cancel any fast tracepoint lock this thread was
1401 holding. */
1402 force_unlock_trace_buffer ();
1403 }
1404
1405 if (lwp->exit_jump_pad_bkpt != NULL)
1406 {
1407 if (debug_threads)
1408 fprintf (stderr,
1409 "Cancelling fast exit-jump-pad: removing bkpt. "
1410 "stopping all threads momentarily.\n");
1411
1412 stop_all_lwps (1, lwp);
1413 cancel_breakpoints ();
1414
1415 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1416 lwp->exit_jump_pad_bkpt = NULL;
1417
1418 unstop_all_lwps (1, lwp);
1419
1420 gdb_assert (lwp->suspended >= 0);
1421 }
1422 }
1423 }
1424
1425 if (debug_threads)
1426 fprintf (stderr, "\
1427 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1428 lwpid_of (lwp));
1429
1430 current_inferior = saved_inferior;
1431 return 0;
1432 }
1433
1434 /* Enqueue one signal in the "signals to report later when out of the
1435 jump pad" list. */
1436
1437 static void
1438 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1439 {
1440 struct pending_signals *p_sig;
1441
1442 if (debug_threads)
1443 fprintf (stderr, "\
1444 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1445
1446 if (debug_threads)
1447 {
1448 struct pending_signals *sig;
1449
1450 for (sig = lwp->pending_signals_to_report;
1451 sig != NULL;
1452 sig = sig->prev)
1453 fprintf (stderr,
1454 " Already queued %d\n",
1455 sig->signal);
1456
1457 fprintf (stderr, " (no more currently queued signals)\n");
1458 }
1459
1460 /* Don't enqueue non-RT signals if they are already in the deferred
1461 queue. (SIGSTOP being the easiest signal to see ending up here
1462 twice) */
1463 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1464 {
1465 struct pending_signals *sig;
1466
1467 for (sig = lwp->pending_signals_to_report;
1468 sig != NULL;
1469 sig = sig->prev)
1470 {
1471 if (sig->signal == WSTOPSIG (*wstat))
1472 {
1473 if (debug_threads)
1474 fprintf (stderr,
1475 "Not requeuing already queued non-RT signal %d"
1476 " for LWP %ld\n",
1477 sig->signal,
1478 lwpid_of (lwp));
1479 return;
1480 }
1481 }
1482 }
1483
1484 p_sig = xmalloc (sizeof (*p_sig));
1485 p_sig->prev = lwp->pending_signals_to_report;
1486 p_sig->signal = WSTOPSIG (*wstat);
1487 memset (&p_sig->info, 0, sizeof (siginfo_t));
1488 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1489
1490 lwp->pending_signals_to_report = p_sig;
1491 }
1492
1493 /* Dequeue one signal from the "signals to report later when out of
1494 the jump pad" list. */
1495
1496 static int
1497 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1498 {
1499 if (lwp->pending_signals_to_report != NULL)
1500 {
1501 struct pending_signals **p_sig;
1502
1503 p_sig = &lwp->pending_signals_to_report;
1504 while ((*p_sig)->prev != NULL)
1505 p_sig = &(*p_sig)->prev;
1506
1507 *wstat = W_STOPCODE ((*p_sig)->signal);
1508 if ((*p_sig)->info.si_signo != 0)
1509 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1510 free (*p_sig);
1511 *p_sig = NULL;
1512
1513 if (debug_threads)
1514 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1515 WSTOPSIG (*wstat), lwpid_of (lwp));
1516
1517 if (debug_threads)
1518 {
1519 struct pending_signals *sig;
1520
1521 for (sig = lwp->pending_signals_to_report;
1522 sig != NULL;
1523 sig = sig->prev)
1524 fprintf (stderr,
1525 " Still queued %d\n",
1526 sig->signal);
1527
1528 fprintf (stderr, " (no more queued signals)\n");
1529 }
1530
1531 return 1;
1532 }
1533
1534 return 0;
1535 }
1536
1537 /* Arrange for a breakpoint to be hit again later. We don't keep the
1538 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1539 will handle the current event, eventually we will resume this LWP,
1540 and this breakpoint will trap again. */
1541
1542 static int
1543 cancel_breakpoint (struct lwp_info *lwp)
1544 {
1545 struct thread_info *saved_inferior;
1546
1547 /* There's nothing to do if we don't support breakpoints. */
1548 if (!supports_breakpoints ())
1549 return 0;
1550
1551 /* breakpoint_at reads from current inferior. */
1552 saved_inferior = current_inferior;
1553 current_inferior = get_lwp_thread (lwp);
1554
1555 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1556 {
1557 if (debug_threads)
1558 fprintf (stderr,
1559 "CB: Push back breakpoint for %s\n",
1560 target_pid_to_str (ptid_of (lwp)));
1561
1562 /* Back up the PC if necessary. */
1563 if (the_low_target.decr_pc_after_break)
1564 {
1565 struct regcache *regcache
1566 = get_thread_regcache (current_inferior, 1);
1567 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1568 }
1569
1570 current_inferior = saved_inferior;
1571 return 1;
1572 }
1573 else
1574 {
1575 if (debug_threads)
1576 fprintf (stderr,
1577 "CB: No breakpoint found at %s for [%s]\n",
1578 paddress (lwp->stop_pc),
1579 target_pid_to_str (ptid_of (lwp)));
1580 }
1581
1582 current_inferior = saved_inferior;
1583 return 0;
1584 }
1585
1586 /* When the event-loop is doing a step-over, this points at the thread
1587 being stepped. */
1588 ptid_t step_over_bkpt;
1589
1590 /* Wait for an event from child PID. If PID is -1, wait for any
1591 child. Store the stop status through the status pointer WSTAT.
1592 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1593 event was found and OPTIONS contains WNOHANG. Return the PID of
1594 the stopped child otherwise. */
1595
1596 static int
1597 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1598 {
1599 struct lwp_info *event_child, *requested_child;
1600 ptid_t wait_ptid;
1601
1602 event_child = NULL;
1603 requested_child = NULL;
1604
1605 /* Check for a lwp with a pending status. */
1606
1607 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1608 {
1609 event_child = (struct lwp_info *)
1610 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1611 if (debug_threads && event_child)
1612 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1613 }
1614 else
1615 {
1616 requested_child = find_lwp_pid (ptid);
1617
1618 if (!stopping_threads
1619 && requested_child->status_pending_p
1620 && requested_child->collecting_fast_tracepoint)
1621 {
1622 enqueue_one_deferred_signal (requested_child,
1623 &requested_child->status_pending);
1624 requested_child->status_pending_p = 0;
1625 requested_child->status_pending = 0;
1626 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1627 }
1628
1629 if (requested_child->suspended
1630 && requested_child->status_pending_p)
1631 fatal ("requesting an event out of a suspended child?");
1632
1633 if (requested_child->status_pending_p)
1634 event_child = requested_child;
1635 }
1636
1637 if (event_child != NULL)
1638 {
1639 if (debug_threads)
1640 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1641 lwpid_of (event_child), event_child->status_pending);
1642 *wstat = event_child->status_pending;
1643 event_child->status_pending_p = 0;
1644 event_child->status_pending = 0;
1645 current_inferior = get_lwp_thread (event_child);
1646 return lwpid_of (event_child);
1647 }
1648
1649 if (ptid_is_pid (ptid))
1650 {
1651 /* A request to wait for a specific tgid. This is not possible
1652 with waitpid, so instead, we wait for any child, and leave
1653 children we're not interested in right now with a pending
1654 status to report later. */
1655 wait_ptid = minus_one_ptid;
1656 }
1657 else
1658 wait_ptid = ptid;
1659
1660 /* We only enter this loop if no process has a pending wait status. Thus
1661 any action taken in response to a wait status inside this loop is
1662 responding as soon as we detect the status, not after any pending
1663 events. */
1664 while (1)
1665 {
1666 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1667
1668 if ((options & WNOHANG) && event_child == NULL)
1669 {
1670 if (debug_threads)
1671 fprintf (stderr, "WNOHANG set, no event found\n");
1672 return 0;
1673 }
1674
1675 if (event_child == NULL)
1676 error ("event from unknown child");
1677
1678 if (ptid_is_pid (ptid)
1679 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1680 {
1681 if (! WIFSTOPPED (*wstat))
1682 mark_lwp_dead (event_child, *wstat);
1683 else
1684 {
1685 event_child->status_pending_p = 1;
1686 event_child->status_pending = *wstat;
1687 }
1688 continue;
1689 }
1690
1691 current_inferior = get_lwp_thread (event_child);
1692
1693 /* Check for thread exit. */
1694 if (! WIFSTOPPED (*wstat))
1695 {
1696 if (debug_threads)
1697 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1698
1699 /* If the last thread is exiting, just return. */
1700 if (last_thread_of_process_p (current_inferior))
1701 {
1702 if (debug_threads)
1703 fprintf (stderr, "LWP %ld is last lwp of process\n",
1704 lwpid_of (event_child));
1705 return lwpid_of (event_child);
1706 }
1707
1708 if (!non_stop)
1709 {
1710 current_inferior = (struct thread_info *) all_threads.head;
1711 if (debug_threads)
1712 fprintf (stderr, "Current inferior is now %ld\n",
1713 lwpid_of (get_thread_lwp (current_inferior)));
1714 }
1715 else
1716 {
1717 current_inferior = NULL;
1718 if (debug_threads)
1719 fprintf (stderr, "Current inferior is now <NULL>\n");
1720 }
1721
1722 /* If we were waiting for this particular child to do something...
1723 well, it did something. */
1724 if (requested_child != NULL)
1725 {
1726 int lwpid = lwpid_of (event_child);
1727
1728 /* Cancel the step-over operation --- the thread that
1729 started it is gone. */
1730 if (finish_step_over (event_child))
1731 unstop_all_lwps (1, event_child);
1732 delete_lwp (event_child);
1733 return lwpid;
1734 }
1735
1736 delete_lwp (event_child);
1737
1738 /* Wait for a more interesting event. */
1739 continue;
1740 }
1741
1742 if (event_child->must_set_ptrace_flags)
1743 {
1744 linux_enable_event_reporting (lwpid_of (event_child));
1745 event_child->must_set_ptrace_flags = 0;
1746 }
1747
1748 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1749 && *wstat >> 16 != 0)
1750 {
1751 handle_extended_wait (event_child, *wstat);
1752 continue;
1753 }
1754
1755 if (WIFSTOPPED (*wstat)
1756 && WSTOPSIG (*wstat) == SIGSTOP
1757 && event_child->stop_expected)
1758 {
1759 int should_stop;
1760
1761 if (debug_threads)
1762 fprintf (stderr, "Expected stop.\n");
1763 event_child->stop_expected = 0;
1764
1765 should_stop = (current_inferior->last_resume_kind == resume_stop
1766 || stopping_threads);
1767
1768 if (!should_stop)
1769 {
1770 linux_resume_one_lwp (event_child,
1771 event_child->stepping, 0, NULL);
1772 continue;
1773 }
1774 }
1775
1776 return lwpid_of (event_child);
1777 }
1778
1779 /* NOTREACHED */
1780 return 0;
1781 }
1782
1783 /* Count the LWP's that have had events. */
1784
1785 static int
1786 count_events_callback (struct inferior_list_entry *entry, void *data)
1787 {
1788 struct lwp_info *lp = (struct lwp_info *) entry;
1789 struct thread_info *thread = get_lwp_thread (lp);
1790 int *count = data;
1791
1792 gdb_assert (count != NULL);
1793
1794 /* Count only resumed LWPs that have a SIGTRAP event pending that
1795 should be reported to GDB. */
1796 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1797 && thread->last_resume_kind != resume_stop
1798 && lp->status_pending_p
1799 && WIFSTOPPED (lp->status_pending)
1800 && WSTOPSIG (lp->status_pending) == SIGTRAP
1801 && !breakpoint_inserted_here (lp->stop_pc))
1802 (*count)++;
1803
1804 return 0;
1805 }
1806
1807 /* Select the LWP (if any) that is currently being single-stepped. */
1808
1809 static int
1810 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1811 {
1812 struct lwp_info *lp = (struct lwp_info *) entry;
1813 struct thread_info *thread = get_lwp_thread (lp);
1814
1815 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1816 && thread->last_resume_kind == resume_step
1817 && lp->status_pending_p)
1818 return 1;
1819 else
1820 return 0;
1821 }
1822
1823 /* Select the Nth LWP that has had a SIGTRAP event that should be
1824 reported to GDB. */
1825
1826 static int
1827 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1828 {
1829 struct lwp_info *lp = (struct lwp_info *) entry;
1830 struct thread_info *thread = get_lwp_thread (lp);
1831 int *selector = data;
1832
1833 gdb_assert (selector != NULL);
1834
1835 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1836 if (thread->last_resume_kind != resume_stop
1837 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1838 && lp->status_pending_p
1839 && WIFSTOPPED (lp->status_pending)
1840 && WSTOPSIG (lp->status_pending) == SIGTRAP
1841 && !breakpoint_inserted_here (lp->stop_pc))
1842 if ((*selector)-- == 0)
1843 return 1;
1844
1845 return 0;
1846 }
1847
1848 static int
1849 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1850 {
1851 struct lwp_info *lp = (struct lwp_info *) entry;
1852 struct thread_info *thread = get_lwp_thread (lp);
1853 struct lwp_info *event_lp = data;
1854
1855 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1856 if (lp == event_lp)
1857 return 0;
1858
1859 /* If a LWP other than the LWP that we're reporting an event for has
1860 hit a GDB breakpoint (as opposed to some random trap signal),
1861 then just arrange for it to hit it again later. We don't keep
1862 the SIGTRAP status and don't forward the SIGTRAP signal to the
1863 LWP. We will handle the current event, eventually we will resume
1864 all LWPs, and this one will get its breakpoint trap again.
1865
1866 If we do not do this, then we run the risk that the user will
1867 delete or disable the breakpoint, but the LWP will have already
1868 tripped on it. */
1869
1870 if (thread->last_resume_kind != resume_stop
1871 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1872 && lp->status_pending_p
1873 && WIFSTOPPED (lp->status_pending)
1874 && WSTOPSIG (lp->status_pending) == SIGTRAP
1875 && !lp->stepping
1876 && !lp->stopped_by_watchpoint
1877 && cancel_breakpoint (lp))
1878 /* Throw away the SIGTRAP. */
1879 lp->status_pending_p = 0;
1880
1881 return 0;
1882 }
1883
1884 static void
1885 linux_cancel_breakpoints (void)
1886 {
1887 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1888 }
1889
1890 /* Select one LWP out of those that have events pending. */
1891
1892 static void
1893 select_event_lwp (struct lwp_info **orig_lp)
1894 {
1895 int num_events = 0;
1896 int random_selector;
1897 struct lwp_info *event_lp;
1898
1899 /* Give preference to any LWP that is being single-stepped. */
1900 event_lp
1901 = (struct lwp_info *) find_inferior (&all_lwps,
1902 select_singlestep_lwp_callback, NULL);
1903 if (event_lp != NULL)
1904 {
1905 if (debug_threads)
1906 fprintf (stderr,
1907 "SEL: Select single-step %s\n",
1908 target_pid_to_str (ptid_of (event_lp)));
1909 }
1910 else
1911 {
1912 /* No single-stepping LWP. Select one at random, out of those
1913 which have had SIGTRAP events. */
1914
1915 /* First see how many SIGTRAP events we have. */
1916 find_inferior (&all_lwps, count_events_callback, &num_events);
1917
1918 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1919 random_selector = (int)
1920 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1921
1922 if (debug_threads && num_events > 1)
1923 fprintf (stderr,
1924 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1925 num_events, random_selector);
1926
1927 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1928 select_event_lwp_callback,
1929 &random_selector);
1930 }
1931
1932 if (event_lp != NULL)
1933 {
1934 /* Switch the event LWP. */
1935 *orig_lp = event_lp;
1936 }
1937 }
1938
1939 /* Decrement the suspend count of an LWP. */
1940
1941 static int
1942 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1943 {
1944 struct lwp_info *lwp = (struct lwp_info *) entry;
1945
1946 /* Ignore EXCEPT. */
1947 if (lwp == except)
1948 return 0;
1949
1950 lwp->suspended--;
1951
1952 gdb_assert (lwp->suspended >= 0);
1953 return 0;
1954 }
1955
1956 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1957 NULL. */
1958
1959 static void
1960 unsuspend_all_lwps (struct lwp_info *except)
1961 {
1962 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1963 }
1964
1965 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1966 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1967 void *data);
1968 static int lwp_running (struct inferior_list_entry *entry, void *data);
1969 static ptid_t linux_wait_1 (ptid_t ptid,
1970 struct target_waitstatus *ourstatus,
1971 int target_options);
1972
1973 /* Stabilize threads (move out of jump pads).
1974
1975 If a thread is midway collecting a fast tracepoint, we need to
1976 finish the collection and move it out of the jump pad before
1977 reporting the signal.
1978
1979 This avoids recursion while collecting (when a signal arrives
1980 midway, and the signal handler itself collects), which would trash
1981 the trace buffer. In case the user set a breakpoint in a signal
1982 handler, this avoids the backtrace showing the jump pad, etc..
1983 Most importantly, there are certain things we can't do safely if
1984 threads are stopped in a jump pad (or in its callee's). For
1985 example:
1986
1987 - starting a new trace run. A thread still collecting the
1988 previous run, could trash the trace buffer when resumed. The trace
1989 buffer control structures would have been reset but the thread had
1990 no way to tell. The thread could even midway memcpy'ing to the
1991 buffer, which would mean that when resumed, it would clobber the
1992 trace buffer that had been set for a new run.
1993
1994 - we can't rewrite/reuse the jump pads for new tracepoints
1995 safely. Say you do tstart while a thread is stopped midway while
1996 collecting. When the thread is later resumed, it finishes the
1997 collection, and returns to the jump pad, to execute the original
1998 instruction that was under the tracepoint jump at the time the
1999 older run had been started. If the jump pad had been rewritten
2000 since for something else in the new run, the thread would now
2001 execute the wrong / random instructions. */
2002
2003 static void
2004 linux_stabilize_threads (void)
2005 {
2006 struct thread_info *save_inferior;
2007 struct lwp_info *lwp_stuck;
2008
2009 lwp_stuck
2010 = (struct lwp_info *) find_inferior (&all_lwps,
2011 stuck_in_jump_pad_callback, NULL);
2012 if (lwp_stuck != NULL)
2013 {
2014 if (debug_threads)
2015 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2016 lwpid_of (lwp_stuck));
2017 return;
2018 }
2019
2020 save_inferior = current_inferior;
2021
2022 stabilizing_threads = 1;
2023
2024 /* Kick 'em all. */
2025 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2026
2027 /* Loop until all are stopped out of the jump pads. */
2028 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2029 {
2030 struct target_waitstatus ourstatus;
2031 struct lwp_info *lwp;
2032 int wstat;
2033
2034 /* Note that we go through the full wait even loop. While
2035 moving threads out of jump pad, we need to be able to step
2036 over internal breakpoints and such. */
2037 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2038
2039 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2040 {
2041 lwp = get_thread_lwp (current_inferior);
2042
2043 /* Lock it. */
2044 lwp->suspended++;
2045
2046 if (ourstatus.value.sig != TARGET_SIGNAL_0
2047 || current_inferior->last_resume_kind == resume_stop)
2048 {
2049 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2050 enqueue_one_deferred_signal (lwp, &wstat);
2051 }
2052 }
2053 }
2054
2055 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2056
2057 stabilizing_threads = 0;
2058
2059 current_inferior = save_inferior;
2060
2061 if (debug_threads)
2062 {
2063 lwp_stuck
2064 = (struct lwp_info *) find_inferior (&all_lwps,
2065 stuck_in_jump_pad_callback, NULL);
2066 if (lwp_stuck != NULL)
2067 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2068 lwpid_of (lwp_stuck));
2069 }
2070 }
2071
2072 /* Wait for process, returns status. */
2073
2074 static ptid_t
2075 linux_wait_1 (ptid_t ptid,
2076 struct target_waitstatus *ourstatus, int target_options)
2077 {
2078 int w;
2079 struct lwp_info *event_child;
2080 int options;
2081 int pid;
2082 int step_over_finished;
2083 int bp_explains_trap;
2084 int maybe_internal_trap;
2085 int report_to_gdb;
2086 int trace_event;
2087
2088 /* Translate generic target options into linux options. */
2089 options = __WALL;
2090 if (target_options & TARGET_WNOHANG)
2091 options |= WNOHANG;
2092
2093 retry:
2094 bp_explains_trap = 0;
2095 trace_event = 0;
2096 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2097
2098 /* If we were only supposed to resume one thread, only wait for
2099 that thread - if it's still alive. If it died, however - which
2100 can happen if we're coming from the thread death case below -
2101 then we need to make sure we restart the other threads. We could
2102 pick a thread at random or restart all; restarting all is less
2103 arbitrary. */
2104 if (!non_stop
2105 && !ptid_equal (cont_thread, null_ptid)
2106 && !ptid_equal (cont_thread, minus_one_ptid))
2107 {
2108 struct thread_info *thread;
2109
2110 thread = (struct thread_info *) find_inferior_id (&all_threads,
2111 cont_thread);
2112
2113 /* No stepping, no signal - unless one is pending already, of course. */
2114 if (thread == NULL)
2115 {
2116 struct thread_resume resume_info;
2117 resume_info.thread = minus_one_ptid;
2118 resume_info.kind = resume_continue;
2119 resume_info.sig = 0;
2120 linux_resume (&resume_info, 1);
2121 }
2122 else
2123 ptid = cont_thread;
2124 }
2125
2126 if (ptid_equal (step_over_bkpt, null_ptid))
2127 pid = linux_wait_for_event (ptid, &w, options);
2128 else
2129 {
2130 if (debug_threads)
2131 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2132 target_pid_to_str (step_over_bkpt));
2133 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2134 }
2135
2136 if (pid == 0) /* only if TARGET_WNOHANG */
2137 return null_ptid;
2138
2139 event_child = get_thread_lwp (current_inferior);
2140
2141 /* If we are waiting for a particular child, and it exited,
2142 linux_wait_for_event will return its exit status. Similarly if
2143 the last child exited. If this is not the last child, however,
2144 do not report it as exited until there is a 'thread exited' response
2145 available in the remote protocol. Instead, just wait for another event.
2146 This should be safe, because if the thread crashed we will already
2147 have reported the termination signal to GDB; that should stop any
2148 in-progress stepping operations, etc.
2149
2150 Report the exit status of the last thread to exit. This matches
2151 LinuxThreads' behavior. */
2152
2153 if (last_thread_of_process_p (current_inferior))
2154 {
2155 if (WIFEXITED (w) || WIFSIGNALED (w))
2156 {
2157 if (WIFEXITED (w))
2158 {
2159 ourstatus->kind = TARGET_WAITKIND_EXITED;
2160 ourstatus->value.integer = WEXITSTATUS (w);
2161
2162 if (debug_threads)
2163 fprintf (stderr,
2164 "\nChild exited with retcode = %x \n",
2165 WEXITSTATUS (w));
2166 }
2167 else
2168 {
2169 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2170 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2171
2172 if (debug_threads)
2173 fprintf (stderr,
2174 "\nChild terminated with signal = %x \n",
2175 WTERMSIG (w));
2176
2177 }
2178
2179 return ptid_of (event_child);
2180 }
2181 }
2182 else
2183 {
2184 if (!WIFSTOPPED (w))
2185 goto retry;
2186 }
2187
2188 /* If this event was not handled before, and is not a SIGTRAP, we
2189 report it. SIGILL and SIGSEGV are also treated as traps in case
2190 a breakpoint is inserted at the current PC. If this target does
2191 not support internal breakpoints at all, we also report the
2192 SIGTRAP without further processing; it's of no concern to us. */
2193 maybe_internal_trap
2194 = (supports_breakpoints ()
2195 && (WSTOPSIG (w) == SIGTRAP
2196 || ((WSTOPSIG (w) == SIGILL
2197 || WSTOPSIG (w) == SIGSEGV)
2198 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2199
2200 if (maybe_internal_trap)
2201 {
2202 /* Handle anything that requires bookkeeping before deciding to
2203 report the event or continue waiting. */
2204
2205 /* First check if we can explain the SIGTRAP with an internal
2206 breakpoint, or if we should possibly report the event to GDB.
2207 Do this before anything that may remove or insert a
2208 breakpoint. */
2209 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2210
2211 /* We have a SIGTRAP, possibly a step-over dance has just
2212 finished. If so, tweak the state machine accordingly,
2213 reinsert breakpoints and delete any reinsert (software
2214 single-step) breakpoints. */
2215 step_over_finished = finish_step_over (event_child);
2216
2217 /* Now invoke the callbacks of any internal breakpoints there. */
2218 check_breakpoints (event_child->stop_pc);
2219
2220 /* Handle tracepoint data collecting. This may overflow the
2221 trace buffer, and cause a tracing stop, removing
2222 breakpoints. */
2223 trace_event = handle_tracepoints (event_child);
2224
2225 if (bp_explains_trap)
2226 {
2227 /* If we stepped or ran into an internal breakpoint, we've
2228 already handled it. So next time we resume (from this
2229 PC), we should step over it. */
2230 if (debug_threads)
2231 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2232
2233 if (breakpoint_here (event_child->stop_pc))
2234 event_child->need_step_over = 1;
2235 }
2236 }
2237 else
2238 {
2239 /* We have some other signal, possibly a step-over dance was in
2240 progress, and it should be cancelled too. */
2241 step_over_finished = finish_step_over (event_child);
2242 }
2243
2244 /* We have all the data we need. Either report the event to GDB, or
2245 resume threads and keep waiting for more. */
2246
2247 /* If we're collecting a fast tracepoint, finish the collection and
2248 move out of the jump pad before delivering a signal. See
2249 linux_stabilize_threads. */
2250
2251 if (WIFSTOPPED (w)
2252 && WSTOPSIG (w) != SIGTRAP
2253 && supports_fast_tracepoints ()
2254 && in_process_agent_loaded ())
2255 {
2256 if (debug_threads)
2257 fprintf (stderr,
2258 "Got signal %d for LWP %ld. Check if we need "
2259 "to defer or adjust it.\n",
2260 WSTOPSIG (w), lwpid_of (event_child));
2261
2262 /* Allow debugging the jump pad itself. */
2263 if (current_inferior->last_resume_kind != resume_step
2264 && maybe_move_out_of_jump_pad (event_child, &w))
2265 {
2266 enqueue_one_deferred_signal (event_child, &w);
2267
2268 if (debug_threads)
2269 fprintf (stderr,
2270 "Signal %d for LWP %ld deferred (in jump pad)\n",
2271 WSTOPSIG (w), lwpid_of (event_child));
2272
2273 linux_resume_one_lwp (event_child, 0, 0, NULL);
2274 goto retry;
2275 }
2276 }
2277
2278 if (event_child->collecting_fast_tracepoint)
2279 {
2280 if (debug_threads)
2281 fprintf (stderr, "\
2282 LWP %ld was trying to move out of the jump pad (%d). \
2283 Check if we're already there.\n",
2284 lwpid_of (event_child),
2285 event_child->collecting_fast_tracepoint);
2286
2287 trace_event = 1;
2288
2289 event_child->collecting_fast_tracepoint
2290 = linux_fast_tracepoint_collecting (event_child, NULL);
2291
2292 if (event_child->collecting_fast_tracepoint != 1)
2293 {
2294 /* No longer need this breakpoint. */
2295 if (event_child->exit_jump_pad_bkpt != NULL)
2296 {
2297 if (debug_threads)
2298 fprintf (stderr,
2299 "No longer need exit-jump-pad bkpt; removing it."
2300 "stopping all threads momentarily.\n");
2301
2302 /* Other running threads could hit this breakpoint.
2303 We don't handle moribund locations like GDB does,
2304 instead we always pause all threads when removing
2305 breakpoints, so that any step-over or
2306 decr_pc_after_break adjustment is always taken
2307 care of while the breakpoint is still
2308 inserted. */
2309 stop_all_lwps (1, event_child);
2310 cancel_breakpoints ();
2311
2312 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2313 event_child->exit_jump_pad_bkpt = NULL;
2314
2315 unstop_all_lwps (1, event_child);
2316
2317 gdb_assert (event_child->suspended >= 0);
2318 }
2319 }
2320
2321 if (event_child->collecting_fast_tracepoint == 0)
2322 {
2323 if (debug_threads)
2324 fprintf (stderr,
2325 "fast tracepoint finished "
2326 "collecting successfully.\n");
2327
2328 /* We may have a deferred signal to report. */
2329 if (dequeue_one_deferred_signal (event_child, &w))
2330 {
2331 if (debug_threads)
2332 fprintf (stderr, "dequeued one signal.\n");
2333 }
2334 else
2335 {
2336 if (debug_threads)
2337 fprintf (stderr, "no deferred signals.\n");
2338
2339 if (stabilizing_threads)
2340 {
2341 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2342 ourstatus->value.sig = TARGET_SIGNAL_0;
2343 return ptid_of (event_child);
2344 }
2345 }
2346 }
2347 }
2348
2349 /* Check whether GDB would be interested in this event. */
2350
2351 /* If GDB is not interested in this signal, don't stop other
2352 threads, and don't report it to GDB. Just resume the inferior
2353 right away. We do this for threading-related signals as well as
2354 any that GDB specifically requested we ignore. But never ignore
2355 SIGSTOP if we sent it ourselves, and do not ignore signals when
2356 stepping - they may require special handling to skip the signal
2357 handler. */
2358 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2359 thread library? */
2360 if (WIFSTOPPED (w)
2361 && current_inferior->last_resume_kind != resume_step
2362 && (
2363 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2364 (current_process ()->private->thread_db != NULL
2365 && (WSTOPSIG (w) == __SIGRTMIN
2366 || WSTOPSIG (w) == __SIGRTMIN + 1))
2367 ||
2368 #endif
2369 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2370 && !(WSTOPSIG (w) == SIGSTOP
2371 && current_inferior->last_resume_kind == resume_stop))))
2372 {
2373 siginfo_t info, *info_p;
2374
2375 if (debug_threads)
2376 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2377 WSTOPSIG (w), lwpid_of (event_child));
2378
2379 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2380 info_p = &info;
2381 else
2382 info_p = NULL;
2383 linux_resume_one_lwp (event_child, event_child->stepping,
2384 WSTOPSIG (w), info_p);
2385 goto retry;
2386 }
2387
2388 /* If GDB wanted this thread to single step, we always want to
2389 report the SIGTRAP, and let GDB handle it. Watchpoints should
2390 always be reported. So should signals we can't explain. A
2391 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2392 not support Z0 breakpoints. If we do, we're be able to handle
2393 GDB breakpoints on top of internal breakpoints, by handling the
2394 internal breakpoint and still reporting the event to GDB. If we
2395 don't, we're out of luck, GDB won't see the breakpoint hit. */
2396 report_to_gdb = (!maybe_internal_trap
2397 || current_inferior->last_resume_kind == resume_step
2398 || event_child->stopped_by_watchpoint
2399 || (!step_over_finished
2400 && !bp_explains_trap && !trace_event)
2401 || gdb_breakpoint_here (event_child->stop_pc));
2402
2403 /* We found no reason GDB would want us to stop. We either hit one
2404 of our own breakpoints, or finished an internal step GDB
2405 shouldn't know about. */
2406 if (!report_to_gdb)
2407 {
2408 if (debug_threads)
2409 {
2410 if (bp_explains_trap)
2411 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2412 if (step_over_finished)
2413 fprintf (stderr, "Step-over finished.\n");
2414 if (trace_event)
2415 fprintf (stderr, "Tracepoint event.\n");
2416 }
2417
2418 /* We're not reporting this breakpoint to GDB, so apply the
2419 decr_pc_after_break adjustment to the inferior's regcache
2420 ourselves. */
2421
2422 if (the_low_target.set_pc != NULL)
2423 {
2424 struct regcache *regcache
2425 = get_thread_regcache (get_lwp_thread (event_child), 1);
2426 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2427 }
2428
2429 /* We may have finished stepping over a breakpoint. If so,
2430 we've stopped and suspended all LWPs momentarily except the
2431 stepping one. This is where we resume them all again. We're
2432 going to keep waiting, so use proceed, which handles stepping
2433 over the next breakpoint. */
2434 if (debug_threads)
2435 fprintf (stderr, "proceeding all threads.\n");
2436
2437 if (step_over_finished)
2438 unsuspend_all_lwps (event_child);
2439
2440 proceed_all_lwps ();
2441 goto retry;
2442 }
2443
2444 if (debug_threads)
2445 {
2446 if (current_inferior->last_resume_kind == resume_step)
2447 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2448 if (event_child->stopped_by_watchpoint)
2449 fprintf (stderr, "Stopped by watchpoint.\n");
2450 if (gdb_breakpoint_here (event_child->stop_pc))
2451 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2452 if (debug_threads)
2453 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2454 }
2455
2456 /* Alright, we're going to report a stop. */
2457
2458 if (!non_stop && !stabilizing_threads)
2459 {
2460 /* In all-stop, stop all threads. */
2461 stop_all_lwps (0, NULL);
2462
2463 /* If we're not waiting for a specific LWP, choose an event LWP
2464 from among those that have had events. Giving equal priority
2465 to all LWPs that have had events helps prevent
2466 starvation. */
2467 if (ptid_equal (ptid, minus_one_ptid))
2468 {
2469 event_child->status_pending_p = 1;
2470 event_child->status_pending = w;
2471
2472 select_event_lwp (&event_child);
2473
2474 event_child->status_pending_p = 0;
2475 w = event_child->status_pending;
2476 }
2477
2478 /* Now that we've selected our final event LWP, cancel any
2479 breakpoints in other LWPs that have hit a GDB breakpoint.
2480 See the comment in cancel_breakpoints_callback to find out
2481 why. */
2482 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2483
2484 /* Stabilize threads (move out of jump pads). */
2485 stabilize_threads ();
2486 }
2487 else
2488 {
2489 /* If we just finished a step-over, then all threads had been
2490 momentarily paused. In all-stop, that's fine, we want
2491 threads stopped by now anyway. In non-stop, we need to
2492 re-resume threads that GDB wanted to be running. */
2493 if (step_over_finished)
2494 unstop_all_lwps (1, event_child);
2495 }
2496
2497 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2498
2499 if (current_inferior->last_resume_kind == resume_stop
2500 && WSTOPSIG (w) == SIGSTOP)
2501 {
2502 /* A thread that has been requested to stop by GDB with vCont;t,
2503 and it stopped cleanly, so report as SIG0. The use of
2504 SIGSTOP is an implementation detail. */
2505 ourstatus->value.sig = TARGET_SIGNAL_0;
2506 }
2507 else if (current_inferior->last_resume_kind == resume_stop
2508 && WSTOPSIG (w) != SIGSTOP)
2509 {
2510 /* A thread that has been requested to stop by GDB with vCont;t,
2511 but, it stopped for other reasons. */
2512 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2513 }
2514 else
2515 {
2516 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2517 }
2518
2519 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2520
2521 if (debug_threads)
2522 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2523 target_pid_to_str (ptid_of (event_child)),
2524 ourstatus->kind,
2525 ourstatus->value.sig);
2526
2527 return ptid_of (event_child);
2528 }
2529
2530 /* Get rid of any pending event in the pipe. */
2531 static void
2532 async_file_flush (void)
2533 {
2534 int ret;
2535 char buf;
2536
2537 do
2538 ret = read (linux_event_pipe[0], &buf, 1);
2539 while (ret >= 0 || (ret == -1 && errno == EINTR));
2540 }
2541
2542 /* Put something in the pipe, so the event loop wakes up. */
2543 static void
2544 async_file_mark (void)
2545 {
2546 int ret;
2547
2548 async_file_flush ();
2549
2550 do
2551 ret = write (linux_event_pipe[1], "+", 1);
2552 while (ret == 0 || (ret == -1 && errno == EINTR));
2553
2554 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2555 be awakened anyway. */
2556 }
2557
2558 static ptid_t
2559 linux_wait (ptid_t ptid,
2560 struct target_waitstatus *ourstatus, int target_options)
2561 {
2562 ptid_t event_ptid;
2563
2564 if (debug_threads)
2565 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2566
2567 /* Flush the async file first. */
2568 if (target_is_async_p ())
2569 async_file_flush ();
2570
2571 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2572
2573 /* If at least one stop was reported, there may be more. A single
2574 SIGCHLD can signal more than one child stop. */
2575 if (target_is_async_p ()
2576 && (target_options & TARGET_WNOHANG) != 0
2577 && !ptid_equal (event_ptid, null_ptid))
2578 async_file_mark ();
2579
2580 return event_ptid;
2581 }
2582
2583 /* Send a signal to an LWP. */
2584
2585 static int
2586 kill_lwp (unsigned long lwpid, int signo)
2587 {
2588 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2589 fails, then we are not using nptl threads and we should be using kill. */
2590
2591 #ifdef __NR_tkill
2592 {
2593 static int tkill_failed;
2594
2595 if (!tkill_failed)
2596 {
2597 int ret;
2598
2599 errno = 0;
2600 ret = syscall (__NR_tkill, lwpid, signo);
2601 if (errno != ENOSYS)
2602 return ret;
2603 tkill_failed = 1;
2604 }
2605 }
2606 #endif
2607
2608 return kill (lwpid, signo);
2609 }
2610
2611 void
2612 linux_stop_lwp (struct lwp_info *lwp)
2613 {
2614 send_sigstop (lwp);
2615 }
2616
2617 static void
2618 send_sigstop (struct lwp_info *lwp)
2619 {
2620 int pid;
2621
2622 pid = lwpid_of (lwp);
2623
2624 /* If we already have a pending stop signal for this process, don't
2625 send another. */
2626 if (lwp->stop_expected)
2627 {
2628 if (debug_threads)
2629 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2630
2631 return;
2632 }
2633
2634 if (debug_threads)
2635 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2636
2637 lwp->stop_expected = 1;
2638 kill_lwp (pid, SIGSTOP);
2639 }
2640
2641 static int
2642 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2643 {
2644 struct lwp_info *lwp = (struct lwp_info *) entry;
2645
2646 /* Ignore EXCEPT. */
2647 if (lwp == except)
2648 return 0;
2649
2650 if (lwp->stopped)
2651 return 0;
2652
2653 send_sigstop (lwp);
2654 return 0;
2655 }
2656
2657 /* Increment the suspend count of an LWP, and stop it, if not stopped
2658 yet. */
2659 static int
2660 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2661 void *except)
2662 {
2663 struct lwp_info *lwp = (struct lwp_info *) entry;
2664
2665 /* Ignore EXCEPT. */
2666 if (lwp == except)
2667 return 0;
2668
2669 lwp->suspended++;
2670
2671 return send_sigstop_callback (entry, except);
2672 }
2673
2674 static void
2675 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2676 {
2677 /* It's dead, really. */
2678 lwp->dead = 1;
2679
2680 /* Store the exit status for later. */
2681 lwp->status_pending_p = 1;
2682 lwp->status_pending = wstat;
2683
2684 /* Prevent trying to stop it. */
2685 lwp->stopped = 1;
2686
2687 /* No further stops are expected from a dead lwp. */
2688 lwp->stop_expected = 0;
2689 }
2690
2691 static void
2692 wait_for_sigstop (struct inferior_list_entry *entry)
2693 {
2694 struct lwp_info *lwp = (struct lwp_info *) entry;
2695 struct thread_info *saved_inferior;
2696 int wstat;
2697 ptid_t saved_tid;
2698 ptid_t ptid;
2699 int pid;
2700
2701 if (lwp->stopped)
2702 {
2703 if (debug_threads)
2704 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2705 lwpid_of (lwp));
2706 return;
2707 }
2708
2709 saved_inferior = current_inferior;
2710 if (saved_inferior != NULL)
2711 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2712 else
2713 saved_tid = null_ptid; /* avoid bogus unused warning */
2714
2715 ptid = lwp->head.id;
2716
2717 if (debug_threads)
2718 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2719
2720 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2721
2722 /* If we stopped with a non-SIGSTOP signal, save it for later
2723 and record the pending SIGSTOP. If the process exited, just
2724 return. */
2725 if (WIFSTOPPED (wstat))
2726 {
2727 if (debug_threads)
2728 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2729 lwpid_of (lwp), WSTOPSIG (wstat));
2730
2731 if (WSTOPSIG (wstat) != SIGSTOP)
2732 {
2733 if (debug_threads)
2734 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2735 lwpid_of (lwp), wstat);
2736
2737 lwp->status_pending_p = 1;
2738 lwp->status_pending = wstat;
2739 }
2740 }
2741 else
2742 {
2743 if (debug_threads)
2744 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2745
2746 lwp = find_lwp_pid (pid_to_ptid (pid));
2747 if (lwp)
2748 {
2749 /* Leave this status pending for the next time we're able to
2750 report it. In the mean time, we'll report this lwp as
2751 dead to GDB, so GDB doesn't try to read registers and
2752 memory from it. This can only happen if this was the
2753 last thread of the process; otherwise, PID is removed
2754 from the thread tables before linux_wait_for_event
2755 returns. */
2756 mark_lwp_dead (lwp, wstat);
2757 }
2758 }
2759
2760 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2761 current_inferior = saved_inferior;
2762 else
2763 {
2764 if (debug_threads)
2765 fprintf (stderr, "Previously current thread died.\n");
2766
2767 if (non_stop)
2768 {
2769 /* We can't change the current inferior behind GDB's back,
2770 otherwise, a subsequent command may apply to the wrong
2771 process. */
2772 current_inferior = NULL;
2773 }
2774 else
2775 {
2776 /* Set a valid thread as current. */
2777 set_desired_inferior (0);
2778 }
2779 }
2780 }
2781
2782 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2783 move it out, because we need to report the stop event to GDB. For
2784 example, if the user puts a breakpoint in the jump pad, it's
2785 because she wants to debug it. */
2786
2787 static int
2788 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2789 {
2790 struct lwp_info *lwp = (struct lwp_info *) entry;
2791 struct thread_info *thread = get_lwp_thread (lwp);
2792
2793 gdb_assert (lwp->suspended == 0);
2794 gdb_assert (lwp->stopped);
2795
2796 /* Allow debugging the jump pad, gdb_collect, etc.. */
2797 return (supports_fast_tracepoints ()
2798 && in_process_agent_loaded ()
2799 && (gdb_breakpoint_here (lwp->stop_pc)
2800 || lwp->stopped_by_watchpoint
2801 || thread->last_resume_kind == resume_step)
2802 && linux_fast_tracepoint_collecting (lwp, NULL));
2803 }
2804
2805 static void
2806 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2807 {
2808 struct lwp_info *lwp = (struct lwp_info *) entry;
2809 struct thread_info *thread = get_lwp_thread (lwp);
2810 int *wstat;
2811
2812 gdb_assert (lwp->suspended == 0);
2813 gdb_assert (lwp->stopped);
2814
2815 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2816
2817 /* Allow debugging the jump pad, gdb_collect, etc. */
2818 if (!gdb_breakpoint_here (lwp->stop_pc)
2819 && !lwp->stopped_by_watchpoint
2820 && thread->last_resume_kind != resume_step
2821 && maybe_move_out_of_jump_pad (lwp, wstat))
2822 {
2823 if (debug_threads)
2824 fprintf (stderr,
2825 "LWP %ld needs stabilizing (in jump pad)\n",
2826 lwpid_of (lwp));
2827
2828 if (wstat)
2829 {
2830 lwp->status_pending_p = 0;
2831 enqueue_one_deferred_signal (lwp, wstat);
2832
2833 if (debug_threads)
2834 fprintf (stderr,
2835 "Signal %d for LWP %ld deferred "
2836 "(in jump pad)\n",
2837 WSTOPSIG (*wstat), lwpid_of (lwp));
2838 }
2839
2840 linux_resume_one_lwp (lwp, 0, 0, NULL);
2841 }
2842 else
2843 lwp->suspended++;
2844 }
2845
2846 static int
2847 lwp_running (struct inferior_list_entry *entry, void *data)
2848 {
2849 struct lwp_info *lwp = (struct lwp_info *) entry;
2850
2851 if (lwp->dead)
2852 return 0;
2853 if (lwp->stopped)
2854 return 0;
2855 return 1;
2856 }
2857
2858 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2859 If SUSPEND, then also increase the suspend count of every LWP,
2860 except EXCEPT. */
2861
2862 static void
2863 stop_all_lwps (int suspend, struct lwp_info *except)
2864 {
2865 stopping_threads = 1;
2866
2867 if (suspend)
2868 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2869 else
2870 find_inferior (&all_lwps, send_sigstop_callback, except);
2871 for_each_inferior (&all_lwps, wait_for_sigstop);
2872 stopping_threads = 0;
2873 }
2874
2875 /* Resume execution of the inferior process.
2876 If STEP is nonzero, single-step it.
2877 If SIGNAL is nonzero, give it that signal. */
2878
2879 static void
2880 linux_resume_one_lwp (struct lwp_info *lwp,
2881 int step, int signal, siginfo_t *info)
2882 {
2883 struct thread_info *saved_inferior;
2884 int fast_tp_collecting;
2885
2886 if (lwp->stopped == 0)
2887 return;
2888
2889 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2890
2891 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2892
2893 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2894 user used the "jump" command, or "set $pc = foo"). */
2895 if (lwp->stop_pc != get_pc (lwp))
2896 {
2897 /* Collecting 'while-stepping' actions doesn't make sense
2898 anymore. */
2899 release_while_stepping_state_list (get_lwp_thread (lwp));
2900 }
2901
2902 /* If we have pending signals or status, and a new signal, enqueue the
2903 signal. Also enqueue the signal if we are waiting to reinsert a
2904 breakpoint; it will be picked up again below. */
2905 if (signal != 0
2906 && (lwp->status_pending_p
2907 || lwp->pending_signals != NULL
2908 || lwp->bp_reinsert != 0
2909 || fast_tp_collecting))
2910 {
2911 struct pending_signals *p_sig;
2912 p_sig = xmalloc (sizeof (*p_sig));
2913 p_sig->prev = lwp->pending_signals;
2914 p_sig->signal = signal;
2915 if (info == NULL)
2916 memset (&p_sig->info, 0, sizeof (siginfo_t));
2917 else
2918 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2919 lwp->pending_signals = p_sig;
2920 }
2921
2922 if (lwp->status_pending_p)
2923 {
2924 if (debug_threads)
2925 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2926 " has pending status\n",
2927 lwpid_of (lwp), step ? "step" : "continue", signal,
2928 lwp->stop_expected ? "expected" : "not expected");
2929 return;
2930 }
2931
2932 saved_inferior = current_inferior;
2933 current_inferior = get_lwp_thread (lwp);
2934
2935 if (debug_threads)
2936 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2937 lwpid_of (lwp), step ? "step" : "continue", signal,
2938 lwp->stop_expected ? "expected" : "not expected");
2939
2940 /* This bit needs some thinking about. If we get a signal that
2941 we must report while a single-step reinsert is still pending,
2942 we often end up resuming the thread. It might be better to
2943 (ew) allow a stack of pending events; then we could be sure that
2944 the reinsert happened right away and not lose any signals.
2945
2946 Making this stack would also shrink the window in which breakpoints are
2947 uninserted (see comment in linux_wait_for_lwp) but not enough for
2948 complete correctness, so it won't solve that problem. It may be
2949 worthwhile just to solve this one, however. */
2950 if (lwp->bp_reinsert != 0)
2951 {
2952 if (debug_threads)
2953 fprintf (stderr, " pending reinsert at 0x%s\n",
2954 paddress (lwp->bp_reinsert));
2955
2956 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2957 {
2958 if (fast_tp_collecting == 0)
2959 {
2960 if (step == 0)
2961 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2962 if (lwp->suspended)
2963 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2964 lwp->suspended);
2965 }
2966
2967 step = 1;
2968 }
2969
2970 /* Postpone any pending signal. It was enqueued above. */
2971 signal = 0;
2972 }
2973
2974 if (fast_tp_collecting == 1)
2975 {
2976 if (debug_threads)
2977 fprintf (stderr, "\
2978 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2979 lwpid_of (lwp));
2980
2981 /* Postpone any pending signal. It was enqueued above. */
2982 signal = 0;
2983 }
2984 else if (fast_tp_collecting == 2)
2985 {
2986 if (debug_threads)
2987 fprintf (stderr, "\
2988 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2989 lwpid_of (lwp));
2990
2991 if (can_hardware_single_step ())
2992 step = 1;
2993 else
2994 fatal ("moving out of jump pad single-stepping"
2995 " not implemented on this target");
2996
2997 /* Postpone any pending signal. It was enqueued above. */
2998 signal = 0;
2999 }
3000
3001 /* If we have while-stepping actions in this thread set it stepping.
3002 If we have a signal to deliver, it may or may not be set to
3003 SIG_IGN, we don't know. Assume so, and allow collecting
3004 while-stepping into a signal handler. A possible smart thing to
3005 do would be to set an internal breakpoint at the signal return
3006 address, continue, and carry on catching this while-stepping
3007 action only when that breakpoint is hit. A future
3008 enhancement. */
3009 if (get_lwp_thread (lwp)->while_stepping != NULL
3010 && can_hardware_single_step ())
3011 {
3012 if (debug_threads)
3013 fprintf (stderr,
3014 "lwp %ld has a while-stepping action -> forcing step.\n",
3015 lwpid_of (lwp));
3016 step = 1;
3017 }
3018
3019 if (debug_threads && the_low_target.get_pc != NULL)
3020 {
3021 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3022 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3023 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3024 }
3025
3026 /* If we have pending signals, consume one unless we are trying to
3027 reinsert a breakpoint or we're trying to finish a fast tracepoint
3028 collect. */
3029 if (lwp->pending_signals != NULL
3030 && lwp->bp_reinsert == 0
3031 && fast_tp_collecting == 0)
3032 {
3033 struct pending_signals **p_sig;
3034
3035 p_sig = &lwp->pending_signals;
3036 while ((*p_sig)->prev != NULL)
3037 p_sig = &(*p_sig)->prev;
3038
3039 signal = (*p_sig)->signal;
3040 if ((*p_sig)->info.si_signo != 0)
3041 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3042
3043 free (*p_sig);
3044 *p_sig = NULL;
3045 }
3046
3047 if (the_low_target.prepare_to_resume != NULL)
3048 the_low_target.prepare_to_resume (lwp);
3049
3050 regcache_invalidate_one ((struct inferior_list_entry *)
3051 get_lwp_thread (lwp));
3052 errno = 0;
3053 lwp->stopped = 0;
3054 lwp->stopped_by_watchpoint = 0;
3055 lwp->stepping = step;
3056 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3057 /* Coerce to a uintptr_t first to avoid potential gcc warning
3058 of coercing an 8 byte integer to a 4 byte pointer. */
3059 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3060
3061 current_inferior = saved_inferior;
3062 if (errno)
3063 {
3064 /* ESRCH from ptrace either means that the thread was already
3065 running (an error) or that it is gone (a race condition). If
3066 it's gone, we will get a notification the next time we wait,
3067 so we can ignore the error. We could differentiate these
3068 two, but it's tricky without waiting; the thread still exists
3069 as a zombie, so sending it signal 0 would succeed. So just
3070 ignore ESRCH. */
3071 if (errno == ESRCH)
3072 return;
3073
3074 perror_with_name ("ptrace");
3075 }
3076 }
3077
3078 struct thread_resume_array
3079 {
3080 struct thread_resume *resume;
3081 size_t n;
3082 };
3083
3084 /* This function is called once per thread. We look up the thread
3085 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3086 resume request.
3087
3088 This algorithm is O(threads * resume elements), but resume elements
3089 is small (and will remain small at least until GDB supports thread
3090 suspension). */
3091 static int
3092 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3093 {
3094 struct lwp_info *lwp;
3095 struct thread_info *thread;
3096 int ndx;
3097 struct thread_resume_array *r;
3098
3099 thread = (struct thread_info *) entry;
3100 lwp = get_thread_lwp (thread);
3101 r = arg;
3102
3103 for (ndx = 0; ndx < r->n; ndx++)
3104 {
3105 ptid_t ptid = r->resume[ndx].thread;
3106 if (ptid_equal (ptid, minus_one_ptid)
3107 || ptid_equal (ptid, entry->id)
3108 || (ptid_is_pid (ptid)
3109 && (ptid_get_pid (ptid) == pid_of (lwp)))
3110 || (ptid_get_lwp (ptid) == -1
3111 && (ptid_get_pid (ptid) == pid_of (lwp))))
3112 {
3113 if (r->resume[ndx].kind == resume_stop
3114 && thread->last_resume_kind == resume_stop)
3115 {
3116 if (debug_threads)
3117 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3118 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3119 ? "stopped"
3120 : "stopping",
3121 lwpid_of (lwp));
3122
3123 continue;
3124 }
3125
3126 lwp->resume = &r->resume[ndx];
3127 thread->last_resume_kind = lwp->resume->kind;
3128
3129 /* If we had a deferred signal to report, dequeue one now.
3130 This can happen if LWP gets more than one signal while
3131 trying to get out of a jump pad. */
3132 if (lwp->stopped
3133 && !lwp->status_pending_p
3134 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3135 {
3136 lwp->status_pending_p = 1;
3137
3138 if (debug_threads)
3139 fprintf (stderr,
3140 "Dequeueing deferred signal %d for LWP %ld, "
3141 "leaving status pending.\n",
3142 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3143 }
3144
3145 return 0;
3146 }
3147 }
3148
3149 /* No resume action for this thread. */
3150 lwp->resume = NULL;
3151
3152 return 0;
3153 }
3154
3155
3156 /* Set *FLAG_P if this lwp has an interesting status pending. */
3157 static int
3158 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3159 {
3160 struct lwp_info *lwp = (struct lwp_info *) entry;
3161
3162 /* LWPs which will not be resumed are not interesting, because
3163 we might not wait for them next time through linux_wait. */
3164 if (lwp->resume == NULL)
3165 return 0;
3166
3167 if (lwp->status_pending_p)
3168 * (int *) flag_p = 1;
3169
3170 return 0;
3171 }
3172
3173 /* Return 1 if this lwp that GDB wants running is stopped at an
3174 internal breakpoint that we need to step over. It assumes that any
3175 required STOP_PC adjustment has already been propagated to the
3176 inferior's regcache. */
3177
3178 static int
3179 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3180 {
3181 struct lwp_info *lwp = (struct lwp_info *) entry;
3182 struct thread_info *thread;
3183 struct thread_info *saved_inferior;
3184 CORE_ADDR pc;
3185
3186 /* LWPs which will not be resumed are not interesting, because we
3187 might not wait for them next time through linux_wait. */
3188
3189 if (!lwp->stopped)
3190 {
3191 if (debug_threads)
3192 fprintf (stderr,
3193 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3194 lwpid_of (lwp));
3195 return 0;
3196 }
3197
3198 thread = get_lwp_thread (lwp);
3199
3200 if (thread->last_resume_kind == resume_stop)
3201 {
3202 if (debug_threads)
3203 fprintf (stderr,
3204 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3205 lwpid_of (lwp));
3206 return 0;
3207 }
3208
3209 gdb_assert (lwp->suspended >= 0);
3210
3211 if (lwp->suspended)
3212 {
3213 if (debug_threads)
3214 fprintf (stderr,
3215 "Need step over [LWP %ld]? Ignoring, suspended\n",
3216 lwpid_of (lwp));
3217 return 0;
3218 }
3219
3220 if (!lwp->need_step_over)
3221 {
3222 if (debug_threads)
3223 fprintf (stderr,
3224 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3225 }
3226
3227 if (lwp->status_pending_p)
3228 {
3229 if (debug_threads)
3230 fprintf (stderr,
3231 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3232 lwpid_of (lwp));
3233 return 0;
3234 }
3235
3236 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3237 or we have. */
3238 pc = get_pc (lwp);
3239
3240 /* If the PC has changed since we stopped, then don't do anything,
3241 and let the breakpoint/tracepoint be hit. This happens if, for
3242 instance, GDB handled the decr_pc_after_break subtraction itself,
3243 GDB is OOL stepping this thread, or the user has issued a "jump"
3244 command, or poked thread's registers herself. */
3245 if (pc != lwp->stop_pc)
3246 {
3247 if (debug_threads)
3248 fprintf (stderr,
3249 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3250 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3251 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3252
3253 lwp->need_step_over = 0;
3254 return 0;
3255 }
3256
3257 saved_inferior = current_inferior;
3258 current_inferior = thread;
3259
3260 /* We can only step over breakpoints we know about. */
3261 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3262 {
3263 /* Don't step over a breakpoint that GDB expects to hit
3264 though. */
3265 if (gdb_breakpoint_here (pc))
3266 {
3267 if (debug_threads)
3268 fprintf (stderr,
3269 "Need step over [LWP %ld]? yes, but found"
3270 " GDB breakpoint at 0x%s; skipping step over\n",
3271 lwpid_of (lwp), paddress (pc));
3272
3273 current_inferior = saved_inferior;
3274 return 0;
3275 }
3276 else
3277 {
3278 if (debug_threads)
3279 fprintf (stderr,
3280 "Need step over [LWP %ld]? yes, "
3281 "found breakpoint at 0x%s\n",
3282 lwpid_of (lwp), paddress (pc));
3283
3284 /* We've found an lwp that needs stepping over --- return 1 so
3285 that find_inferior stops looking. */
3286 current_inferior = saved_inferior;
3287
3288 /* If the step over is cancelled, this is set again. */
3289 lwp->need_step_over = 0;
3290 return 1;
3291 }
3292 }
3293
3294 current_inferior = saved_inferior;
3295
3296 if (debug_threads)
3297 fprintf (stderr,
3298 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3299 lwpid_of (lwp), paddress (pc));
3300
3301 return 0;
3302 }
3303
3304 /* Start a step-over operation on LWP. When LWP stopped at a
3305 breakpoint, to make progress, we need to remove the breakpoint out
3306 of the way. If we let other threads run while we do that, they may
3307 pass by the breakpoint location and miss hitting it. To avoid
3308 that, a step-over momentarily stops all threads while LWP is
3309 single-stepped while the breakpoint is temporarily uninserted from
3310 the inferior. When the single-step finishes, we reinsert the
3311 breakpoint, and let all threads that are supposed to be running,
3312 run again.
3313
3314 On targets that don't support hardware single-step, we don't
3315 currently support full software single-stepping. Instead, we only
3316 support stepping over the thread event breakpoint, by asking the
3317 low target where to place a reinsert breakpoint. Since this
3318 routine assumes the breakpoint being stepped over is a thread event
3319 breakpoint, it usually assumes the return address of the current
3320 function is a good enough place to set the reinsert breakpoint. */
3321
3322 static int
3323 start_step_over (struct lwp_info *lwp)
3324 {
3325 struct thread_info *saved_inferior;
3326 CORE_ADDR pc;
3327 int step;
3328
3329 if (debug_threads)
3330 fprintf (stderr,
3331 "Starting step-over on LWP %ld. Stopping all threads\n",
3332 lwpid_of (lwp));
3333
3334 stop_all_lwps (1, lwp);
3335 gdb_assert (lwp->suspended == 0);
3336
3337 if (debug_threads)
3338 fprintf (stderr, "Done stopping all threads for step-over.\n");
3339
3340 /* Note, we should always reach here with an already adjusted PC,
3341 either by GDB (if we're resuming due to GDB's request), or by our
3342 caller, if we just finished handling an internal breakpoint GDB
3343 shouldn't care about. */
3344 pc = get_pc (lwp);
3345
3346 saved_inferior = current_inferior;
3347 current_inferior = get_lwp_thread (lwp);
3348
3349 lwp->bp_reinsert = pc;
3350 uninsert_breakpoints_at (pc);
3351 uninsert_fast_tracepoint_jumps_at (pc);
3352
3353 if (can_hardware_single_step ())
3354 {
3355 step = 1;
3356 }
3357 else
3358 {
3359 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3360 set_reinsert_breakpoint (raddr);
3361 step = 0;
3362 }
3363
3364 current_inferior = saved_inferior;
3365
3366 linux_resume_one_lwp (lwp, step, 0, NULL);
3367
3368 /* Require next event from this LWP. */
3369 step_over_bkpt = lwp->head.id;
3370 return 1;
3371 }
3372
3373 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3374 start_step_over, if still there, and delete any reinsert
3375 breakpoints we've set, on non hardware single-step targets. */
3376
3377 static int
3378 finish_step_over (struct lwp_info *lwp)
3379 {
3380 if (lwp->bp_reinsert != 0)
3381 {
3382 if (debug_threads)
3383 fprintf (stderr, "Finished step over.\n");
3384
3385 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3386 may be no breakpoint to reinsert there by now. */
3387 reinsert_breakpoints_at (lwp->bp_reinsert);
3388 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3389
3390 lwp->bp_reinsert = 0;
3391
3392 /* Delete any software-single-step reinsert breakpoints. No
3393 longer needed. We don't have to worry about other threads
3394 hitting this trap, and later not being able to explain it,
3395 because we were stepping over a breakpoint, and we hold all
3396 threads but LWP stopped while doing that. */
3397 if (!can_hardware_single_step ())
3398 delete_reinsert_breakpoints ();
3399
3400 step_over_bkpt = null_ptid;
3401 return 1;
3402 }
3403 else
3404 return 0;
3405 }
3406
3407 /* This function is called once per thread. We check the thread's resume
3408 request, which will tell us whether to resume, step, or leave the thread
3409 stopped; and what signal, if any, it should be sent.
3410
3411 For threads which we aren't explicitly told otherwise, we preserve
3412 the stepping flag; this is used for stepping over gdbserver-placed
3413 breakpoints.
3414
3415 If pending_flags was set in any thread, we queue any needed
3416 signals, since we won't actually resume. We already have a pending
3417 event to report, so we don't need to preserve any step requests;
3418 they should be re-issued if necessary. */
3419
3420 static int
3421 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3422 {
3423 struct lwp_info *lwp;
3424 struct thread_info *thread;
3425 int step;
3426 int leave_all_stopped = * (int *) arg;
3427 int leave_pending;
3428
3429 thread = (struct thread_info *) entry;
3430 lwp = get_thread_lwp (thread);
3431
3432 if (lwp->resume == NULL)
3433 return 0;
3434
3435 if (lwp->resume->kind == resume_stop)
3436 {
3437 if (debug_threads)
3438 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3439
3440 if (!lwp->stopped)
3441 {
3442 if (debug_threads)
3443 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3444
3445 /* Stop the thread, and wait for the event asynchronously,
3446 through the event loop. */
3447 send_sigstop (lwp);
3448 }
3449 else
3450 {
3451 if (debug_threads)
3452 fprintf (stderr, "already stopped LWP %ld\n",
3453 lwpid_of (lwp));
3454
3455 /* The LWP may have been stopped in an internal event that
3456 was not meant to be notified back to GDB (e.g., gdbserver
3457 breakpoint), so we should be reporting a stop event in
3458 this case too. */
3459
3460 /* If the thread already has a pending SIGSTOP, this is a
3461 no-op. Otherwise, something later will presumably resume
3462 the thread and this will cause it to cancel any pending
3463 operation, due to last_resume_kind == resume_stop. If
3464 the thread already has a pending status to report, we
3465 will still report it the next time we wait - see
3466 status_pending_p_callback. */
3467
3468 /* If we already have a pending signal to report, then
3469 there's no need to queue a SIGSTOP, as this means we're
3470 midway through moving the LWP out of the jumppad, and we
3471 will report the pending signal as soon as that is
3472 finished. */
3473 if (lwp->pending_signals_to_report == NULL)
3474 send_sigstop (lwp);
3475 }
3476
3477 /* For stop requests, we're done. */
3478 lwp->resume = NULL;
3479 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3480 return 0;
3481 }
3482
3483 /* If this thread which is about to be resumed has a pending status,
3484 then don't resume any threads - we can just report the pending
3485 status. Make sure to queue any signals that would otherwise be
3486 sent. In all-stop mode, we do this decision based on if *any*
3487 thread has a pending status. If there's a thread that needs the
3488 step-over-breakpoint dance, then don't resume any other thread
3489 but that particular one. */
3490 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3491
3492 if (!leave_pending)
3493 {
3494 if (debug_threads)
3495 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3496
3497 step = (lwp->resume->kind == resume_step);
3498 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3499 }
3500 else
3501 {
3502 if (debug_threads)
3503 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3504
3505 /* If we have a new signal, enqueue the signal. */
3506 if (lwp->resume->sig != 0)
3507 {
3508 struct pending_signals *p_sig;
3509 p_sig = xmalloc (sizeof (*p_sig));
3510 p_sig->prev = lwp->pending_signals;
3511 p_sig->signal = lwp->resume->sig;
3512 memset (&p_sig->info, 0, sizeof (siginfo_t));
3513
3514 /* If this is the same signal we were previously stopped by,
3515 make sure to queue its siginfo. We can ignore the return
3516 value of ptrace; if it fails, we'll skip
3517 PTRACE_SETSIGINFO. */
3518 if (WIFSTOPPED (lwp->last_status)
3519 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3520 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3521
3522 lwp->pending_signals = p_sig;
3523 }
3524 }
3525
3526 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3527 lwp->resume = NULL;
3528 return 0;
3529 }
3530
3531 static void
3532 linux_resume (struct thread_resume *resume_info, size_t n)
3533 {
3534 struct thread_resume_array array = { resume_info, n };
3535 struct lwp_info *need_step_over = NULL;
3536 int any_pending;
3537 int leave_all_stopped;
3538
3539 find_inferior (&all_threads, linux_set_resume_request, &array);
3540
3541 /* If there is a thread which would otherwise be resumed, which has
3542 a pending status, then don't resume any threads - we can just
3543 report the pending status. Make sure to queue any signals that
3544 would otherwise be sent. In non-stop mode, we'll apply this
3545 logic to each thread individually. We consume all pending events
3546 before considering to start a step-over (in all-stop). */
3547 any_pending = 0;
3548 if (!non_stop)
3549 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3550
3551 /* If there is a thread which would otherwise be resumed, which is
3552 stopped at a breakpoint that needs stepping over, then don't
3553 resume any threads - have it step over the breakpoint with all
3554 other threads stopped, then resume all threads again. Make sure
3555 to queue any signals that would otherwise be delivered or
3556 queued. */
3557 if (!any_pending && supports_breakpoints ())
3558 need_step_over
3559 = (struct lwp_info *) find_inferior (&all_lwps,
3560 need_step_over_p, NULL);
3561
3562 leave_all_stopped = (need_step_over != NULL || any_pending);
3563
3564 if (debug_threads)
3565 {
3566 if (need_step_over != NULL)
3567 fprintf (stderr, "Not resuming all, need step over\n");
3568 else if (any_pending)
3569 fprintf (stderr,
3570 "Not resuming, all-stop and found "
3571 "an LWP with pending status\n");
3572 else
3573 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3574 }
3575
3576 /* Even if we're leaving threads stopped, queue all signals we'd
3577 otherwise deliver. */
3578 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3579
3580 if (need_step_over)
3581 start_step_over (need_step_over);
3582 }
3583
3584 /* This function is called once per thread. We check the thread's
3585 last resume request, which will tell us whether to resume, step, or
3586 leave the thread stopped. Any signal the client requested to be
3587 delivered has already been enqueued at this point.
3588
3589 If any thread that GDB wants running is stopped at an internal
3590 breakpoint that needs stepping over, we start a step-over operation
3591 on that particular thread, and leave all others stopped. */
3592
3593 static int
3594 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3595 {
3596 struct lwp_info *lwp = (struct lwp_info *) entry;
3597 struct thread_info *thread;
3598 int step;
3599
3600 if (lwp == except)
3601 return 0;
3602
3603 if (debug_threads)
3604 fprintf (stderr,
3605 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3606
3607 if (!lwp->stopped)
3608 {
3609 if (debug_threads)
3610 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3611 return 0;
3612 }
3613
3614 thread = get_lwp_thread (lwp);
3615
3616 if (thread->last_resume_kind == resume_stop
3617 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3618 {
3619 if (debug_threads)
3620 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3621 lwpid_of (lwp));
3622 return 0;
3623 }
3624
3625 if (lwp->status_pending_p)
3626 {
3627 if (debug_threads)
3628 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3629 lwpid_of (lwp));
3630 return 0;
3631 }
3632
3633 gdb_assert (lwp->suspended >= 0);
3634
3635 if (lwp->suspended)
3636 {
3637 if (debug_threads)
3638 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3639 return 0;
3640 }
3641
3642 if (thread->last_resume_kind == resume_stop
3643 && lwp->pending_signals_to_report == NULL
3644 && lwp->collecting_fast_tracepoint == 0)
3645 {
3646 /* We haven't reported this LWP as stopped yet (otherwise, the
3647 last_status.kind check above would catch it, and we wouldn't
3648 reach here. This LWP may have been momentarily paused by a
3649 stop_all_lwps call while handling for example, another LWP's
3650 step-over. In that case, the pending expected SIGSTOP signal
3651 that was queued at vCont;t handling time will have already
3652 been consumed by wait_for_sigstop, and so we need to requeue
3653 another one here. Note that if the LWP already has a SIGSTOP
3654 pending, this is a no-op. */
3655
3656 if (debug_threads)
3657 fprintf (stderr,
3658 "Client wants LWP %ld to stop. "
3659 "Making sure it has a SIGSTOP pending\n",
3660 lwpid_of (lwp));
3661
3662 send_sigstop (lwp);
3663 }
3664
3665 step = thread->last_resume_kind == resume_step;
3666 linux_resume_one_lwp (lwp, step, 0, NULL);
3667 return 0;
3668 }
3669
3670 static int
3671 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3672 {
3673 struct lwp_info *lwp = (struct lwp_info *) entry;
3674
3675 if (lwp == except)
3676 return 0;
3677
3678 lwp->suspended--;
3679 gdb_assert (lwp->suspended >= 0);
3680
3681 return proceed_one_lwp (entry, except);
3682 }
3683
3684 /* When we finish a step-over, set threads running again. If there's
3685 another thread that may need a step-over, now's the time to start
3686 it. Eventually, we'll move all threads past their breakpoints. */
3687
3688 static void
3689 proceed_all_lwps (void)
3690 {
3691 struct lwp_info *need_step_over;
3692
3693 /* If there is a thread which would otherwise be resumed, which is
3694 stopped at a breakpoint that needs stepping over, then don't
3695 resume any threads - have it step over the breakpoint with all
3696 other threads stopped, then resume all threads again. */
3697
3698 if (supports_breakpoints ())
3699 {
3700 need_step_over
3701 = (struct lwp_info *) find_inferior (&all_lwps,
3702 need_step_over_p, NULL);
3703
3704 if (need_step_over != NULL)
3705 {
3706 if (debug_threads)
3707 fprintf (stderr, "proceed_all_lwps: found "
3708 "thread %ld needing a step-over\n",
3709 lwpid_of (need_step_over));
3710
3711 start_step_over (need_step_over);
3712 return;
3713 }
3714 }
3715
3716 if (debug_threads)
3717 fprintf (stderr, "Proceeding, no step-over needed\n");
3718
3719 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3720 }
3721
3722 /* Stopped LWPs that the client wanted to be running, that don't have
3723 pending statuses, are set to run again, except for EXCEPT, if not
3724 NULL. This undoes a stop_all_lwps call. */
3725
3726 static void
3727 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3728 {
3729 if (debug_threads)
3730 {
3731 if (except)
3732 fprintf (stderr,
3733 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3734 else
3735 fprintf (stderr,
3736 "unstopping all lwps\n");
3737 }
3738
3739 if (unsuspend)
3740 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3741 else
3742 find_inferior (&all_lwps, proceed_one_lwp, except);
3743 }
3744
3745 #ifdef HAVE_LINUX_USRREGS
3746
3747 int
3748 register_addr (int regnum)
3749 {
3750 int addr;
3751
3752 if (regnum < 0 || regnum >= the_low_target.num_regs)
3753 error ("Invalid register number %d.", regnum);
3754
3755 addr = the_low_target.regmap[regnum];
3756
3757 return addr;
3758 }
3759
3760 /* Fetch one register. */
3761 static void
3762 fetch_register (struct regcache *regcache, int regno)
3763 {
3764 CORE_ADDR regaddr;
3765 int i, size;
3766 char *buf;
3767 int pid;
3768
3769 if (regno >= the_low_target.num_regs)
3770 return;
3771 if ((*the_low_target.cannot_fetch_register) (regno))
3772 return;
3773
3774 regaddr = register_addr (regno);
3775 if (regaddr == -1)
3776 return;
3777
3778 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3779 & -sizeof (PTRACE_XFER_TYPE));
3780 buf = alloca (size);
3781
3782 pid = lwpid_of (get_thread_lwp (current_inferior));
3783 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3784 {
3785 errno = 0;
3786 *(PTRACE_XFER_TYPE *) (buf + i) =
3787 ptrace (PTRACE_PEEKUSER, pid,
3788 /* Coerce to a uintptr_t first to avoid potential gcc warning
3789 of coercing an 8 byte integer to a 4 byte pointer. */
3790 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3791 regaddr += sizeof (PTRACE_XFER_TYPE);
3792 if (errno != 0)
3793 error ("reading register %d: %s", regno, strerror (errno));
3794 }
3795
3796 if (the_low_target.supply_ptrace_register)
3797 the_low_target.supply_ptrace_register (regcache, regno, buf);
3798 else
3799 supply_register (regcache, regno, buf);
3800 }
3801
3802 /* Store one register. */
3803 static void
3804 store_register (struct regcache *regcache, int regno)
3805 {
3806 CORE_ADDR regaddr;
3807 int i, size;
3808 char *buf;
3809 int pid;
3810
3811 if (regno >= the_low_target.num_regs)
3812 return;
3813 if ((*the_low_target.cannot_store_register) (regno))
3814 return;
3815
3816 regaddr = register_addr (regno);
3817 if (regaddr == -1)
3818 return;
3819
3820 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3821 & -sizeof (PTRACE_XFER_TYPE));
3822 buf = alloca (size);
3823 memset (buf, 0, size);
3824
3825 if (the_low_target.collect_ptrace_register)
3826 the_low_target.collect_ptrace_register (regcache, regno, buf);
3827 else
3828 collect_register (regcache, regno, buf);
3829
3830 pid = lwpid_of (get_thread_lwp (current_inferior));
3831 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3832 {
3833 errno = 0;
3834 ptrace (PTRACE_POKEUSER, pid,
3835 /* Coerce to a uintptr_t first to avoid potential gcc warning
3836 about coercing an 8 byte integer to a 4 byte pointer. */
3837 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3838 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3839 if (errno != 0)
3840 {
3841 /* At this point, ESRCH should mean the process is
3842 already gone, in which case we simply ignore attempts
3843 to change its registers. See also the related
3844 comment in linux_resume_one_lwp. */
3845 if (errno == ESRCH)
3846 return;
3847
3848 if ((*the_low_target.cannot_store_register) (regno) == 0)
3849 error ("writing register %d: %s", regno, strerror (errno));
3850 }
3851 regaddr += sizeof (PTRACE_XFER_TYPE);
3852 }
3853 }
3854
3855 /* Fetch all registers, or just one, from the child process. */
3856 static void
3857 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3858 {
3859 if (regno == -1)
3860 for (regno = 0; regno < the_low_target.num_regs; regno++)
3861 fetch_register (regcache, regno);
3862 else
3863 fetch_register (regcache, regno);
3864 }
3865
3866 /* Store our register values back into the inferior.
3867 If REGNO is -1, do this for all registers.
3868 Otherwise, REGNO specifies which register (so we can save time). */
3869 static void
3870 usr_store_inferior_registers (struct regcache *regcache, int regno)
3871 {
3872 if (regno == -1)
3873 for (regno = 0; regno < the_low_target.num_regs; regno++)
3874 store_register (regcache, regno);
3875 else
3876 store_register (regcache, regno);
3877 }
3878 #endif /* HAVE_LINUX_USRREGS */
3879
3880
3881
3882 #ifdef HAVE_LINUX_REGSETS
3883
3884 static int
3885 regsets_fetch_inferior_registers (struct regcache *regcache)
3886 {
3887 struct regset_info *regset;
3888 int saw_general_regs = 0;
3889 int pid;
3890 struct iovec iov;
3891
3892 regset = target_regsets;
3893
3894 pid = lwpid_of (get_thread_lwp (current_inferior));
3895 while (regset->size >= 0)
3896 {
3897 void *buf, *data;
3898 int nt_type, res;
3899
3900 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3901 {
3902 regset ++;
3903 continue;
3904 }
3905
3906 buf = xmalloc (regset->size);
3907
3908 nt_type = regset->nt_type;
3909 if (nt_type)
3910 {
3911 iov.iov_base = buf;
3912 iov.iov_len = regset->size;
3913 data = (void *) &iov;
3914 }
3915 else
3916 data = buf;
3917
3918 #ifndef __sparc__
3919 res = ptrace (regset->get_request, pid, nt_type, data);
3920 #else
3921 res = ptrace (regset->get_request, pid, data, nt_type);
3922 #endif
3923 if (res < 0)
3924 {
3925 if (errno == EIO)
3926 {
3927 /* If we get EIO on a regset, do not try it again for
3928 this process. */
3929 disabled_regsets[regset - target_regsets] = 1;
3930 free (buf);
3931 continue;
3932 }
3933 else
3934 {
3935 char s[256];
3936 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3937 pid);
3938 perror (s);
3939 }
3940 }
3941 else if (regset->type == GENERAL_REGS)
3942 saw_general_regs = 1;
3943 regset->store_function (regcache, buf);
3944 regset ++;
3945 free (buf);
3946 }
3947 if (saw_general_regs)
3948 return 0;
3949 else
3950 return 1;
3951 }
3952
3953 static int
3954 regsets_store_inferior_registers (struct regcache *regcache)
3955 {
3956 struct regset_info *regset;
3957 int saw_general_regs = 0;
3958 int pid;
3959 struct iovec iov;
3960
3961 regset = target_regsets;
3962
3963 pid = lwpid_of (get_thread_lwp (current_inferior));
3964 while (regset->size >= 0)
3965 {
3966 void *buf, *data;
3967 int nt_type, res;
3968
3969 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3970 {
3971 regset ++;
3972 continue;
3973 }
3974
3975 buf = xmalloc (regset->size);
3976
3977 /* First fill the buffer with the current register set contents,
3978 in case there are any items in the kernel's regset that are
3979 not in gdbserver's regcache. */
3980
3981 nt_type = regset->nt_type;
3982 if (nt_type)
3983 {
3984 iov.iov_base = buf;
3985 iov.iov_len = regset->size;
3986 data = (void *) &iov;
3987 }
3988 else
3989 data = buf;
3990
3991 #ifndef __sparc__
3992 res = ptrace (regset->get_request, pid, nt_type, data);
3993 #else
3994 res = ptrace (regset->get_request, pid, &iov, data);
3995 #endif
3996
3997 if (res == 0)
3998 {
3999 /* Then overlay our cached registers on that. */
4000 regset->fill_function (regcache, buf);
4001
4002 /* Only now do we write the register set. */
4003 #ifndef __sparc__
4004 res = ptrace (regset->set_request, pid, nt_type, data);
4005 #else
4006 res = ptrace (regset->set_request, pid, data, nt_type);
4007 #endif
4008 }
4009
4010 if (res < 0)
4011 {
4012 if (errno == EIO)
4013 {
4014 /* If we get EIO on a regset, do not try it again for
4015 this process. */
4016 disabled_regsets[regset - target_regsets] = 1;
4017 free (buf);
4018 continue;
4019 }
4020 else if (errno == ESRCH)
4021 {
4022 /* At this point, ESRCH should mean the process is
4023 already gone, in which case we simply ignore attempts
4024 to change its registers. See also the related
4025 comment in linux_resume_one_lwp. */
4026 free (buf);
4027 return 0;
4028 }
4029 else
4030 {
4031 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4032 }
4033 }
4034 else if (regset->type == GENERAL_REGS)
4035 saw_general_regs = 1;
4036 regset ++;
4037 free (buf);
4038 }
4039 if (saw_general_regs)
4040 return 0;
4041 else
4042 return 1;
4043 return 0;
4044 }
4045
4046 #endif /* HAVE_LINUX_REGSETS */
4047
4048
4049 void
4050 linux_fetch_registers (struct regcache *regcache, int regno)
4051 {
4052 #ifdef HAVE_LINUX_REGSETS
4053 if (regsets_fetch_inferior_registers (regcache) == 0)
4054 return;
4055 #endif
4056 #ifdef HAVE_LINUX_USRREGS
4057 usr_fetch_inferior_registers (regcache, regno);
4058 #endif
4059 }
4060
4061 void
4062 linux_store_registers (struct regcache *regcache, int regno)
4063 {
4064 #ifdef HAVE_LINUX_REGSETS
4065 if (regsets_store_inferior_registers (regcache) == 0)
4066 return;
4067 #endif
4068 #ifdef HAVE_LINUX_USRREGS
4069 usr_store_inferior_registers (regcache, regno);
4070 #endif
4071 }
4072
4073
4074 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4075 to debugger memory starting at MYADDR. */
4076
4077 static int
4078 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4079 {
4080 register int i;
4081 /* Round starting address down to longword boundary. */
4082 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4083 /* Round ending address up; get number of longwords that makes. */
4084 register int count
4085 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4086 / sizeof (PTRACE_XFER_TYPE);
4087 /* Allocate buffer of that many longwords. */
4088 register PTRACE_XFER_TYPE *buffer
4089 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4090 int fd;
4091 char filename[64];
4092 int pid = lwpid_of (get_thread_lwp (current_inferior));
4093
4094 /* Try using /proc. Don't bother for one word. */
4095 if (len >= 3 * sizeof (long))
4096 {
4097 /* We could keep this file open and cache it - possibly one per
4098 thread. That requires some juggling, but is even faster. */
4099 sprintf (filename, "/proc/%d/mem", pid);
4100 fd = open (filename, O_RDONLY | O_LARGEFILE);
4101 if (fd == -1)
4102 goto no_proc;
4103
4104 /* If pread64 is available, use it. It's faster if the kernel
4105 supports it (only one syscall), and it's 64-bit safe even on
4106 32-bit platforms (for instance, SPARC debugging a SPARC64
4107 application). */
4108 #ifdef HAVE_PREAD64
4109 if (pread64 (fd, myaddr, len, memaddr) != len)
4110 #else
4111 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4112 #endif
4113 {
4114 close (fd);
4115 goto no_proc;
4116 }
4117
4118 close (fd);
4119 return 0;
4120 }
4121
4122 no_proc:
4123 /* Read all the longwords */
4124 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4125 {
4126 errno = 0;
4127 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4128 about coercing an 8 byte integer to a 4 byte pointer. */
4129 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4130 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4131 if (errno)
4132 return errno;
4133 }
4134
4135 /* Copy appropriate bytes out of the buffer. */
4136 memcpy (myaddr,
4137 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4138 len);
4139
4140 return 0;
4141 }
4142
4143 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4144 memory at MEMADDR. On failure (cannot write to the inferior)
4145 returns the value of errno. */
4146
4147 static int
4148 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4149 {
4150 register int i;
4151 /* Round starting address down to longword boundary. */
4152 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4153 /* Round ending address up; get number of longwords that makes. */
4154 register int count
4155 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4156 / sizeof (PTRACE_XFER_TYPE);
4157
4158 /* Allocate buffer of that many longwords. */
4159 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4160 alloca (count * sizeof (PTRACE_XFER_TYPE));
4161
4162 int pid = lwpid_of (get_thread_lwp (current_inferior));
4163
4164 if (debug_threads)
4165 {
4166 /* Dump up to four bytes. */
4167 unsigned int val = * (unsigned int *) myaddr;
4168 if (len == 1)
4169 val = val & 0xff;
4170 else if (len == 2)
4171 val = val & 0xffff;
4172 else if (len == 3)
4173 val = val & 0xffffff;
4174 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4175 val, (long)memaddr);
4176 }
4177
4178 /* Fill start and end extra bytes of buffer with existing memory data. */
4179
4180 errno = 0;
4181 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4182 about coercing an 8 byte integer to a 4 byte pointer. */
4183 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4184 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4185 if (errno)
4186 return errno;
4187
4188 if (count > 1)
4189 {
4190 errno = 0;
4191 buffer[count - 1]
4192 = ptrace (PTRACE_PEEKTEXT, pid,
4193 /* Coerce to a uintptr_t first to avoid potential gcc warning
4194 about coercing an 8 byte integer to a 4 byte pointer. */
4195 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4196 * sizeof (PTRACE_XFER_TYPE)),
4197 0);
4198 if (errno)
4199 return errno;
4200 }
4201
4202 /* Copy data to be written over corresponding part of buffer. */
4203
4204 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4205 myaddr, len);
4206
4207 /* Write the entire buffer. */
4208
4209 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4210 {
4211 errno = 0;
4212 ptrace (PTRACE_POKETEXT, pid,
4213 /* Coerce to a uintptr_t first to avoid potential gcc warning
4214 about coercing an 8 byte integer to a 4 byte pointer. */
4215 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4216 (PTRACE_ARG4_TYPE) buffer[i]);
4217 if (errno)
4218 return errno;
4219 }
4220
4221 return 0;
4222 }
4223
4224 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4225 static int linux_supports_tracefork_flag;
4226
4227 static void
4228 linux_enable_event_reporting (int pid)
4229 {
4230 if (!linux_supports_tracefork_flag)
4231 return;
4232
4233 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4234 }
4235
4236 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4237
4238 static int
4239 linux_tracefork_grandchild (void *arg)
4240 {
4241 _exit (0);
4242 }
4243
4244 #define STACK_SIZE 4096
4245
4246 static int
4247 linux_tracefork_child (void *arg)
4248 {
4249 ptrace (PTRACE_TRACEME, 0, 0, 0);
4250 kill (getpid (), SIGSTOP);
4251
4252 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4253
4254 if (fork () == 0)
4255 linux_tracefork_grandchild (NULL);
4256
4257 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4258
4259 #ifdef __ia64__
4260 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4261 CLONE_VM | SIGCHLD, NULL);
4262 #else
4263 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4264 CLONE_VM | SIGCHLD, NULL);
4265 #endif
4266
4267 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4268
4269 _exit (0);
4270 }
4271
4272 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4273 sure that we can enable the option, and that it had the desired
4274 effect. */
4275
4276 static void
4277 linux_test_for_tracefork (void)
4278 {
4279 int child_pid, ret, status;
4280 long second_pid;
4281 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4282 char *stack = xmalloc (STACK_SIZE * 4);
4283 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4284
4285 linux_supports_tracefork_flag = 0;
4286
4287 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4288
4289 child_pid = fork ();
4290 if (child_pid == 0)
4291 linux_tracefork_child (NULL);
4292
4293 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4294
4295 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4296 #ifdef __ia64__
4297 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4298 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4299 #else /* !__ia64__ */
4300 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4301 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4302 #endif /* !__ia64__ */
4303
4304 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4305
4306 if (child_pid == -1)
4307 perror_with_name ("clone");
4308
4309 ret = my_waitpid (child_pid, &status, 0);
4310 if (ret == -1)
4311 perror_with_name ("waitpid");
4312 else if (ret != child_pid)
4313 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4314 if (! WIFSTOPPED (status))
4315 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4316
4317 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4318 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4319 if (ret != 0)
4320 {
4321 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4322 if (ret != 0)
4323 {
4324 warning ("linux_test_for_tracefork: failed to kill child");
4325 return;
4326 }
4327
4328 ret = my_waitpid (child_pid, &status, 0);
4329 if (ret != child_pid)
4330 warning ("linux_test_for_tracefork: failed to wait for killed child");
4331 else if (!WIFSIGNALED (status))
4332 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4333 "killed child", status);
4334
4335 return;
4336 }
4337
4338 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4339 if (ret != 0)
4340 warning ("linux_test_for_tracefork: failed to resume child");
4341
4342 ret = my_waitpid (child_pid, &status, 0);
4343
4344 if (ret == child_pid && WIFSTOPPED (status)
4345 && status >> 16 == PTRACE_EVENT_FORK)
4346 {
4347 second_pid = 0;
4348 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4349 if (ret == 0 && second_pid != 0)
4350 {
4351 int second_status;
4352
4353 linux_supports_tracefork_flag = 1;
4354 my_waitpid (second_pid, &second_status, 0);
4355 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4356 if (ret != 0)
4357 warning ("linux_test_for_tracefork: failed to kill second child");
4358 my_waitpid (second_pid, &status, 0);
4359 }
4360 }
4361 else
4362 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4363 "(%d, status 0x%x)", ret, status);
4364
4365 do
4366 {
4367 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4368 if (ret != 0)
4369 warning ("linux_test_for_tracefork: failed to kill child");
4370 my_waitpid (child_pid, &status, 0);
4371 }
4372 while (WIFSTOPPED (status));
4373
4374 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4375 free (stack);
4376 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4377 }
4378
4379
4380 static void
4381 linux_look_up_symbols (void)
4382 {
4383 #ifdef USE_THREAD_DB
4384 struct process_info *proc = current_process ();
4385
4386 if (proc->private->thread_db != NULL)
4387 return;
4388
4389 /* If the kernel supports tracing forks then it also supports tracing
4390 clones, and then we don't need to use the magic thread event breakpoint
4391 to learn about threads. */
4392 thread_db_init (!linux_supports_tracefork_flag);
4393 #endif
4394 }
4395
4396 static void
4397 linux_request_interrupt (void)
4398 {
4399 extern unsigned long signal_pid;
4400
4401 if (!ptid_equal (cont_thread, null_ptid)
4402 && !ptid_equal (cont_thread, minus_one_ptid))
4403 {
4404 struct lwp_info *lwp;
4405 int lwpid;
4406
4407 lwp = get_thread_lwp (current_inferior);
4408 lwpid = lwpid_of (lwp);
4409 kill_lwp (lwpid, SIGINT);
4410 }
4411 else
4412 kill_lwp (signal_pid, SIGINT);
4413 }
4414
4415 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4416 to debugger memory starting at MYADDR. */
4417
4418 static int
4419 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4420 {
4421 char filename[PATH_MAX];
4422 int fd, n;
4423 int pid = lwpid_of (get_thread_lwp (current_inferior));
4424
4425 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4426
4427 fd = open (filename, O_RDONLY);
4428 if (fd < 0)
4429 return -1;
4430
4431 if (offset != (CORE_ADDR) 0
4432 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4433 n = -1;
4434 else
4435 n = read (fd, myaddr, len);
4436
4437 close (fd);
4438
4439 return n;
4440 }
4441
4442 /* These breakpoint and watchpoint related wrapper functions simply
4443 pass on the function call if the target has registered a
4444 corresponding function. */
4445
4446 static int
4447 linux_insert_point (char type, CORE_ADDR addr, int len)
4448 {
4449 if (the_low_target.insert_point != NULL)
4450 return the_low_target.insert_point (type, addr, len);
4451 else
4452 /* Unsupported (see target.h). */
4453 return 1;
4454 }
4455
4456 static int
4457 linux_remove_point (char type, CORE_ADDR addr, int len)
4458 {
4459 if (the_low_target.remove_point != NULL)
4460 return the_low_target.remove_point (type, addr, len);
4461 else
4462 /* Unsupported (see target.h). */
4463 return 1;
4464 }
4465
4466 static int
4467 linux_stopped_by_watchpoint (void)
4468 {
4469 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4470
4471 return lwp->stopped_by_watchpoint;
4472 }
4473
4474 static CORE_ADDR
4475 linux_stopped_data_address (void)
4476 {
4477 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4478
4479 return lwp->stopped_data_address;
4480 }
4481
4482 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4483 #if defined(__mcoldfire__)
4484 /* These should really be defined in the kernel's ptrace.h header. */
4485 #define PT_TEXT_ADDR 49*4
4486 #define PT_DATA_ADDR 50*4
4487 #define PT_TEXT_END_ADDR 51*4
4488 #elif defined(BFIN)
4489 #define PT_TEXT_ADDR 220
4490 #define PT_TEXT_END_ADDR 224
4491 #define PT_DATA_ADDR 228
4492 #elif defined(__TMS320C6X__)
4493 #define PT_TEXT_ADDR (0x10000*4)
4494 #define PT_DATA_ADDR (0x10004*4)
4495 #define PT_TEXT_END_ADDR (0x10008*4)
4496 #endif
4497
4498 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4499 to tell gdb about. */
4500
4501 static int
4502 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4503 {
4504 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4505 unsigned long text, text_end, data;
4506 int pid = lwpid_of (get_thread_lwp (current_inferior));
4507
4508 errno = 0;
4509
4510 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4511 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4512 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4513
4514 if (errno == 0)
4515 {
4516 /* Both text and data offsets produced at compile-time (and so
4517 used by gdb) are relative to the beginning of the program,
4518 with the data segment immediately following the text segment.
4519 However, the actual runtime layout in memory may put the data
4520 somewhere else, so when we send gdb a data base-address, we
4521 use the real data base address and subtract the compile-time
4522 data base-address from it (which is just the length of the
4523 text segment). BSS immediately follows data in both
4524 cases. */
4525 *text_p = text;
4526 *data_p = data - (text_end - text);
4527
4528 return 1;
4529 }
4530 #endif
4531 return 0;
4532 }
4533 #endif
4534
4535 static int
4536 linux_qxfer_osdata (const char *annex,
4537 unsigned char *readbuf, unsigned const char *writebuf,
4538 CORE_ADDR offset, int len)
4539 {
4540 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4541 }
4542
4543 /* Convert a native/host siginfo object, into/from the siginfo in the
4544 layout of the inferiors' architecture. */
4545
4546 static void
4547 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4548 {
4549 int done = 0;
4550
4551 if (the_low_target.siginfo_fixup != NULL)
4552 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4553
4554 /* If there was no callback, or the callback didn't do anything,
4555 then just do a straight memcpy. */
4556 if (!done)
4557 {
4558 if (direction == 1)
4559 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4560 else
4561 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4562 }
4563 }
4564
4565 static int
4566 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4567 unsigned const char *writebuf, CORE_ADDR offset, int len)
4568 {
4569 int pid;
4570 struct siginfo siginfo;
4571 char inf_siginfo[sizeof (struct siginfo)];
4572
4573 if (current_inferior == NULL)
4574 return -1;
4575
4576 pid = lwpid_of (get_thread_lwp (current_inferior));
4577
4578 if (debug_threads)
4579 fprintf (stderr, "%s siginfo for lwp %d.\n",
4580 readbuf != NULL ? "Reading" : "Writing",
4581 pid);
4582
4583 if (offset >= sizeof (siginfo))
4584 return -1;
4585
4586 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4587 return -1;
4588
4589 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4590 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4591 inferior with a 64-bit GDBSERVER should look the same as debugging it
4592 with a 32-bit GDBSERVER, we need to convert it. */
4593 siginfo_fixup (&siginfo, inf_siginfo, 0);
4594
4595 if (offset + len > sizeof (siginfo))
4596 len = sizeof (siginfo) - offset;
4597
4598 if (readbuf != NULL)
4599 memcpy (readbuf, inf_siginfo + offset, len);
4600 else
4601 {
4602 memcpy (inf_siginfo + offset, writebuf, len);
4603
4604 /* Convert back to ptrace layout before flushing it out. */
4605 siginfo_fixup (&siginfo, inf_siginfo, 1);
4606
4607 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4608 return -1;
4609 }
4610
4611 return len;
4612 }
4613
4614 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4615 so we notice when children change state; as the handler for the
4616 sigsuspend in my_waitpid. */
4617
4618 static void
4619 sigchld_handler (int signo)
4620 {
4621 int old_errno = errno;
4622
4623 if (debug_threads)
4624 {
4625 do
4626 {
4627 /* fprintf is not async-signal-safe, so call write
4628 directly. */
4629 if (write (2, "sigchld_handler\n",
4630 sizeof ("sigchld_handler\n") - 1) < 0)
4631 break; /* just ignore */
4632 } while (0);
4633 }
4634
4635 if (target_is_async_p ())
4636 async_file_mark (); /* trigger a linux_wait */
4637
4638 errno = old_errno;
4639 }
4640
4641 static int
4642 linux_supports_non_stop (void)
4643 {
4644 return 1;
4645 }
4646
4647 static int
4648 linux_async (int enable)
4649 {
4650 int previous = (linux_event_pipe[0] != -1);
4651
4652 if (debug_threads)
4653 fprintf (stderr, "linux_async (%d), previous=%d\n",
4654 enable, previous);
4655
4656 if (previous != enable)
4657 {
4658 sigset_t mask;
4659 sigemptyset (&mask);
4660 sigaddset (&mask, SIGCHLD);
4661
4662 sigprocmask (SIG_BLOCK, &mask, NULL);
4663
4664 if (enable)
4665 {
4666 if (pipe (linux_event_pipe) == -1)
4667 fatal ("creating event pipe failed.");
4668
4669 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4670 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4671
4672 /* Register the event loop handler. */
4673 add_file_handler (linux_event_pipe[0],
4674 handle_target_event, NULL);
4675
4676 /* Always trigger a linux_wait. */
4677 async_file_mark ();
4678 }
4679 else
4680 {
4681 delete_file_handler (linux_event_pipe[0]);
4682
4683 close (linux_event_pipe[0]);
4684 close (linux_event_pipe[1]);
4685 linux_event_pipe[0] = -1;
4686 linux_event_pipe[1] = -1;
4687 }
4688
4689 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4690 }
4691
4692 return previous;
4693 }
4694
4695 static int
4696 linux_start_non_stop (int nonstop)
4697 {
4698 /* Register or unregister from event-loop accordingly. */
4699 linux_async (nonstop);
4700 return 0;
4701 }
4702
4703 static int
4704 linux_supports_multi_process (void)
4705 {
4706 return 1;
4707 }
4708
4709 static int
4710 linux_supports_disable_randomization (void)
4711 {
4712 #ifdef HAVE_PERSONALITY
4713 return 1;
4714 #else
4715 return 0;
4716 #endif
4717 }
4718
4719 /* Enumerate spufs IDs for process PID. */
4720 static int
4721 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4722 {
4723 int pos = 0;
4724 int written = 0;
4725 char path[128];
4726 DIR *dir;
4727 struct dirent *entry;
4728
4729 sprintf (path, "/proc/%ld/fd", pid);
4730 dir = opendir (path);
4731 if (!dir)
4732 return -1;
4733
4734 rewinddir (dir);
4735 while ((entry = readdir (dir)) != NULL)
4736 {
4737 struct stat st;
4738 struct statfs stfs;
4739 int fd;
4740
4741 fd = atoi (entry->d_name);
4742 if (!fd)
4743 continue;
4744
4745 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4746 if (stat (path, &st) != 0)
4747 continue;
4748 if (!S_ISDIR (st.st_mode))
4749 continue;
4750
4751 if (statfs (path, &stfs) != 0)
4752 continue;
4753 if (stfs.f_type != SPUFS_MAGIC)
4754 continue;
4755
4756 if (pos >= offset && pos + 4 <= offset + len)
4757 {
4758 *(unsigned int *)(buf + pos - offset) = fd;
4759 written += 4;
4760 }
4761 pos += 4;
4762 }
4763
4764 closedir (dir);
4765 return written;
4766 }
4767
4768 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4769 object type, using the /proc file system. */
4770 static int
4771 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4772 unsigned const char *writebuf,
4773 CORE_ADDR offset, int len)
4774 {
4775 long pid = lwpid_of (get_thread_lwp (current_inferior));
4776 char buf[128];
4777 int fd = 0;
4778 int ret = 0;
4779
4780 if (!writebuf && !readbuf)
4781 return -1;
4782
4783 if (!*annex)
4784 {
4785 if (!readbuf)
4786 return -1;
4787 else
4788 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4789 }
4790
4791 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4792 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4793 if (fd <= 0)
4794 return -1;
4795
4796 if (offset != 0
4797 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4798 {
4799 close (fd);
4800 return 0;
4801 }
4802
4803 if (writebuf)
4804 ret = write (fd, writebuf, (size_t) len);
4805 else
4806 ret = read (fd, readbuf, (size_t) len);
4807
4808 close (fd);
4809 return ret;
4810 }
4811
4812 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4813 struct target_loadseg
4814 {
4815 /* Core address to which the segment is mapped. */
4816 Elf32_Addr addr;
4817 /* VMA recorded in the program header. */
4818 Elf32_Addr p_vaddr;
4819 /* Size of this segment in memory. */
4820 Elf32_Word p_memsz;
4821 };
4822
4823 # if defined PT_GETDSBT
4824 struct target_loadmap
4825 {
4826 /* Protocol version number, must be zero. */
4827 Elf32_Word version;
4828 /* Pointer to the DSBT table, its size, and the DSBT index. */
4829 unsigned *dsbt_table;
4830 unsigned dsbt_size, dsbt_index;
4831 /* Number of segments in this map. */
4832 Elf32_Word nsegs;
4833 /* The actual memory map. */
4834 struct target_loadseg segs[/*nsegs*/];
4835 };
4836 # define LINUX_LOADMAP PT_GETDSBT
4837 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4838 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4839 # else
4840 struct target_loadmap
4841 {
4842 /* Protocol version number, must be zero. */
4843 Elf32_Half version;
4844 /* Number of segments in this map. */
4845 Elf32_Half nsegs;
4846 /* The actual memory map. */
4847 struct target_loadseg segs[/*nsegs*/];
4848 };
4849 # define LINUX_LOADMAP PTRACE_GETFDPIC
4850 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4851 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4852 # endif
4853
4854 static int
4855 linux_read_loadmap (const char *annex, CORE_ADDR offset,
4856 unsigned char *myaddr, unsigned int len)
4857 {
4858 int pid = lwpid_of (get_thread_lwp (current_inferior));
4859 int addr = -1;
4860 struct target_loadmap *data = NULL;
4861 unsigned int actual_length, copy_length;
4862
4863 if (strcmp (annex, "exec") == 0)
4864 addr = (int) LINUX_LOADMAP_EXEC;
4865 else if (strcmp (annex, "interp") == 0)
4866 addr = (int) LINUX_LOADMAP_INTERP;
4867 else
4868 return -1;
4869
4870 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
4871 return -1;
4872
4873 if (data == NULL)
4874 return -1;
4875
4876 actual_length = sizeof (struct target_loadmap)
4877 + sizeof (struct target_loadseg) * data->nsegs;
4878
4879 if (offset < 0 || offset > actual_length)
4880 return -1;
4881
4882 copy_length = actual_length - offset < len ? actual_length - offset : len;
4883 memcpy (myaddr, (char *) data + offset, copy_length);
4884 return copy_length;
4885 }
4886 #else
4887 # define linux_read_loadmap NULL
4888 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
4889
4890 static void
4891 linux_process_qsupported (const char *query)
4892 {
4893 if (the_low_target.process_qsupported != NULL)
4894 the_low_target.process_qsupported (query);
4895 }
4896
4897 static int
4898 linux_supports_tracepoints (void)
4899 {
4900 if (*the_low_target.supports_tracepoints == NULL)
4901 return 0;
4902
4903 return (*the_low_target.supports_tracepoints) ();
4904 }
4905
4906 static CORE_ADDR
4907 linux_read_pc (struct regcache *regcache)
4908 {
4909 if (the_low_target.get_pc == NULL)
4910 return 0;
4911
4912 return (*the_low_target.get_pc) (regcache);
4913 }
4914
4915 static void
4916 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4917 {
4918 gdb_assert (the_low_target.set_pc != NULL);
4919
4920 (*the_low_target.set_pc) (regcache, pc);
4921 }
4922
4923 static int
4924 linux_thread_stopped (struct thread_info *thread)
4925 {
4926 return get_thread_lwp (thread)->stopped;
4927 }
4928
4929 /* This exposes stop-all-threads functionality to other modules. */
4930
4931 static void
4932 linux_pause_all (int freeze)
4933 {
4934 stop_all_lwps (freeze, NULL);
4935 }
4936
4937 /* This exposes unstop-all-threads functionality to other gdbserver
4938 modules. */
4939
4940 static void
4941 linux_unpause_all (int unfreeze)
4942 {
4943 unstop_all_lwps (unfreeze, NULL);
4944 }
4945
4946 static int
4947 linux_prepare_to_access_memory (void)
4948 {
4949 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4950 running LWP. */
4951 if (non_stop)
4952 linux_pause_all (1);
4953 return 0;
4954 }
4955
4956 static void
4957 linux_done_accessing_memory (void)
4958 {
4959 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4960 running LWP. */
4961 if (non_stop)
4962 linux_unpause_all (1);
4963 }
4964
4965 static int
4966 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4967 CORE_ADDR collector,
4968 CORE_ADDR lockaddr,
4969 ULONGEST orig_size,
4970 CORE_ADDR *jump_entry,
4971 CORE_ADDR *trampoline,
4972 ULONGEST *trampoline_size,
4973 unsigned char *jjump_pad_insn,
4974 ULONGEST *jjump_pad_insn_size,
4975 CORE_ADDR *adjusted_insn_addr,
4976 CORE_ADDR *adjusted_insn_addr_end,
4977 char *err)
4978 {
4979 return (*the_low_target.install_fast_tracepoint_jump_pad)
4980 (tpoint, tpaddr, collector, lockaddr, orig_size,
4981 jump_entry, trampoline, trampoline_size,
4982 jjump_pad_insn, jjump_pad_insn_size,
4983 adjusted_insn_addr, adjusted_insn_addr_end,
4984 err);
4985 }
4986
4987 static struct emit_ops *
4988 linux_emit_ops (void)
4989 {
4990 if (the_low_target.emit_ops != NULL)
4991 return (*the_low_target.emit_ops) ();
4992 else
4993 return NULL;
4994 }
4995
4996 static int
4997 linux_get_min_fast_tracepoint_insn_len (void)
4998 {
4999 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5000 }
5001
5002 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5003
5004 static int
5005 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5006 CORE_ADDR *phdr_memaddr, int *num_phdr)
5007 {
5008 char filename[PATH_MAX];
5009 int fd;
5010 const int auxv_size = is_elf64
5011 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5012 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5013
5014 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5015
5016 fd = open (filename, O_RDONLY);
5017 if (fd < 0)
5018 return 1;
5019
5020 *phdr_memaddr = 0;
5021 *num_phdr = 0;
5022 while (read (fd, buf, auxv_size) == auxv_size
5023 && (*phdr_memaddr == 0 || *num_phdr == 0))
5024 {
5025 if (is_elf64)
5026 {
5027 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5028
5029 switch (aux->a_type)
5030 {
5031 case AT_PHDR:
5032 *phdr_memaddr = aux->a_un.a_val;
5033 break;
5034 case AT_PHNUM:
5035 *num_phdr = aux->a_un.a_val;
5036 break;
5037 }
5038 }
5039 else
5040 {
5041 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5042
5043 switch (aux->a_type)
5044 {
5045 case AT_PHDR:
5046 *phdr_memaddr = aux->a_un.a_val;
5047 break;
5048 case AT_PHNUM:
5049 *num_phdr = aux->a_un.a_val;
5050 break;
5051 }
5052 }
5053 }
5054
5055 close (fd);
5056
5057 if (*phdr_memaddr == 0 || *num_phdr == 0)
5058 {
5059 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5060 "phdr_memaddr = %ld, phdr_num = %d",
5061 (long) *phdr_memaddr, *num_phdr);
5062 return 2;
5063 }
5064
5065 return 0;
5066 }
5067
5068 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5069
5070 static CORE_ADDR
5071 get_dynamic (const int pid, const int is_elf64)
5072 {
5073 CORE_ADDR phdr_memaddr, relocation;
5074 int num_phdr, i;
5075 unsigned char *phdr_buf;
5076 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5077
5078 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5079 return 0;
5080
5081 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5082 phdr_buf = alloca (num_phdr * phdr_size);
5083
5084 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5085 return 0;
5086
5087 /* Compute relocation: it is expected to be 0 for "regular" executables,
5088 non-zero for PIE ones. */
5089 relocation = -1;
5090 for (i = 0; relocation == -1 && i < num_phdr; i++)
5091 if (is_elf64)
5092 {
5093 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5094
5095 if (p->p_type == PT_PHDR)
5096 relocation = phdr_memaddr - p->p_vaddr;
5097 }
5098 else
5099 {
5100 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5101
5102 if (p->p_type == PT_PHDR)
5103 relocation = phdr_memaddr - p->p_vaddr;
5104 }
5105
5106 if (relocation == -1)
5107 {
5108 warning ("Unexpected missing PT_PHDR");
5109 return 0;
5110 }
5111
5112 for (i = 0; i < num_phdr; i++)
5113 {
5114 if (is_elf64)
5115 {
5116 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5117
5118 if (p->p_type == PT_DYNAMIC)
5119 return p->p_vaddr + relocation;
5120 }
5121 else
5122 {
5123 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5124
5125 if (p->p_type == PT_DYNAMIC)
5126 return p->p_vaddr + relocation;
5127 }
5128 }
5129
5130 return 0;
5131 }
5132
5133 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5134 can be 0 if the inferior does not yet have the library list initialized. */
5135
5136 static CORE_ADDR
5137 get_r_debug (const int pid, const int is_elf64)
5138 {
5139 CORE_ADDR dynamic_memaddr;
5140 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5141 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5142
5143 dynamic_memaddr = get_dynamic (pid, is_elf64);
5144 if (dynamic_memaddr == 0)
5145 return (CORE_ADDR) -1;
5146
5147 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5148 {
5149 if (is_elf64)
5150 {
5151 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5152
5153 if (dyn->d_tag == DT_DEBUG)
5154 return dyn->d_un.d_val;
5155
5156 if (dyn->d_tag == DT_NULL)
5157 break;
5158 }
5159 else
5160 {
5161 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5162
5163 if (dyn->d_tag == DT_DEBUG)
5164 return dyn->d_un.d_val;
5165
5166 if (dyn->d_tag == DT_NULL)
5167 break;
5168 }
5169
5170 dynamic_memaddr += dyn_size;
5171 }
5172
5173 return (CORE_ADDR) -1;
5174 }
5175
5176 /* Read one pointer from MEMADDR in the inferior. */
5177
5178 static int
5179 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5180 {
5181 *ptr = 0;
5182 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5183 }
5184
5185 struct link_map_offsets
5186 {
5187 /* Offset and size of r_debug.r_version. */
5188 int r_version_offset;
5189
5190 /* Offset and size of r_debug.r_map. */
5191 int r_map_offset;
5192
5193 /* Offset to l_addr field in struct link_map. */
5194 int l_addr_offset;
5195
5196 /* Offset to l_name field in struct link_map. */
5197 int l_name_offset;
5198
5199 /* Offset to l_ld field in struct link_map. */
5200 int l_ld_offset;
5201
5202 /* Offset to l_next field in struct link_map. */
5203 int l_next_offset;
5204
5205 /* Offset to l_prev field in struct link_map. */
5206 int l_prev_offset;
5207 };
5208
5209 /* Construct qXfer:libraries:read reply. */
5210
5211 static int
5212 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5213 unsigned const char *writebuf,
5214 CORE_ADDR offset, int len)
5215 {
5216 char *document;
5217 unsigned document_len;
5218 struct process_info_private *const priv = current_process ()->private;
5219 char filename[PATH_MAX];
5220 int pid, is_elf64;
5221
5222 static const struct link_map_offsets lmo_32bit_offsets =
5223 {
5224 0, /* r_version offset. */
5225 4, /* r_debug.r_map offset. */
5226 0, /* l_addr offset in link_map. */
5227 4, /* l_name offset in link_map. */
5228 8, /* l_ld offset in link_map. */
5229 12, /* l_next offset in link_map. */
5230 16 /* l_prev offset in link_map. */
5231 };
5232
5233 static const struct link_map_offsets lmo_64bit_offsets =
5234 {
5235 0, /* r_version offset. */
5236 8, /* r_debug.r_map offset. */
5237 0, /* l_addr offset in link_map. */
5238 8, /* l_name offset in link_map. */
5239 16, /* l_ld offset in link_map. */
5240 24, /* l_next offset in link_map. */
5241 32 /* l_prev offset in link_map. */
5242 };
5243 const struct link_map_offsets *lmo;
5244
5245 if (writebuf != NULL)
5246 return -2;
5247 if (readbuf == NULL)
5248 return -1;
5249
5250 pid = lwpid_of (get_thread_lwp (current_inferior));
5251 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5252 is_elf64 = elf_64_file_p (filename);
5253 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5254
5255 if (priv->r_debug == 0)
5256 priv->r_debug = get_r_debug (pid, is_elf64);
5257
5258 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5259 {
5260 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5261 }
5262 else
5263 {
5264 int allocated = 1024;
5265 char *p;
5266 const int ptr_size = is_elf64 ? 8 : 4;
5267 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5268 int r_version, header_done = 0;
5269
5270 document = xmalloc (allocated);
5271 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5272 p = document + strlen (document);
5273
5274 r_version = 0;
5275 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5276 (unsigned char *) &r_version,
5277 sizeof (r_version)) != 0
5278 || r_version != 1)
5279 {
5280 warning ("unexpected r_debug version %d", r_version);
5281 goto done;
5282 }
5283
5284 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5285 &lm_addr, ptr_size) != 0)
5286 {
5287 warning ("unable to read r_map from 0x%lx",
5288 (long) priv->r_debug + lmo->r_map_offset);
5289 goto done;
5290 }
5291
5292 lm_prev = 0;
5293 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5294 &l_name, ptr_size) == 0
5295 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5296 &l_addr, ptr_size) == 0
5297 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5298 &l_ld, ptr_size) == 0
5299 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5300 &l_prev, ptr_size) == 0
5301 && read_one_ptr (lm_addr + lmo->l_next_offset,
5302 &l_next, ptr_size) == 0)
5303 {
5304 unsigned char libname[PATH_MAX];
5305
5306 if (lm_prev != l_prev)
5307 {
5308 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5309 (long) lm_prev, (long) l_prev);
5310 break;
5311 }
5312
5313 /* Not checking for error because reading may stop before
5314 we've got PATH_MAX worth of characters. */
5315 libname[0] = '\0';
5316 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5317 libname[sizeof (libname) - 1] = '\0';
5318 if (libname[0] != '\0')
5319 {
5320 /* 6x the size for xml_escape_text below. */
5321 size_t len = 6 * strlen ((char *) libname);
5322 char *name;
5323
5324 if (!header_done)
5325 {
5326 /* Terminate `<library-list-svr4'. */
5327 *p++ = '>';
5328 header_done = 1;
5329 }
5330
5331 while (allocated < p - document + len + 200)
5332 {
5333 /* Expand to guarantee sufficient storage. */
5334 uintptr_t document_len = p - document;
5335
5336 document = xrealloc (document, 2 * allocated);
5337 allocated *= 2;
5338 p = document + document_len;
5339 }
5340
5341 name = xml_escape_text ((char *) libname);
5342 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5343 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5344 name, (unsigned long) lm_addr,
5345 (unsigned long) l_addr, (unsigned long) l_ld);
5346 free (name);
5347 }
5348 else if (lm_prev == 0)
5349 {
5350 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5351 p = p + strlen (p);
5352 }
5353
5354 if (l_next == 0)
5355 break;
5356
5357 lm_prev = lm_addr;
5358 lm_addr = l_next;
5359 }
5360 done:
5361 strcpy (p, "</library-list-svr4>");
5362 }
5363
5364 document_len = strlen (document);
5365 if (offset < document_len)
5366 document_len -= offset;
5367 else
5368 document_len = 0;
5369 if (len > document_len)
5370 len = document_len;
5371
5372 memcpy (readbuf, document + offset, len);
5373 xfree (document);
5374
5375 return len;
5376 }
5377
5378 static struct target_ops linux_target_ops = {
5379 linux_create_inferior,
5380 linux_attach,
5381 linux_kill,
5382 linux_detach,
5383 linux_mourn,
5384 linux_join,
5385 linux_thread_alive,
5386 linux_resume,
5387 linux_wait,
5388 linux_fetch_registers,
5389 linux_store_registers,
5390 linux_prepare_to_access_memory,
5391 linux_done_accessing_memory,
5392 linux_read_memory,
5393 linux_write_memory,
5394 linux_look_up_symbols,
5395 linux_request_interrupt,
5396 linux_read_auxv,
5397 linux_insert_point,
5398 linux_remove_point,
5399 linux_stopped_by_watchpoint,
5400 linux_stopped_data_address,
5401 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5402 linux_read_offsets,
5403 #else
5404 NULL,
5405 #endif
5406 #ifdef USE_THREAD_DB
5407 thread_db_get_tls_address,
5408 #else
5409 NULL,
5410 #endif
5411 linux_qxfer_spu,
5412 hostio_last_error_from_errno,
5413 linux_qxfer_osdata,
5414 linux_xfer_siginfo,
5415 linux_supports_non_stop,
5416 linux_async,
5417 linux_start_non_stop,
5418 linux_supports_multi_process,
5419 #ifdef USE_THREAD_DB
5420 thread_db_handle_monitor_command,
5421 #else
5422 NULL,
5423 #endif
5424 linux_common_core_of_thread,
5425 linux_read_loadmap,
5426 linux_process_qsupported,
5427 linux_supports_tracepoints,
5428 linux_read_pc,
5429 linux_write_pc,
5430 linux_thread_stopped,
5431 NULL,
5432 linux_pause_all,
5433 linux_unpause_all,
5434 linux_cancel_breakpoints,
5435 linux_stabilize_threads,
5436 linux_install_fast_tracepoint_jump_pad,
5437 linux_emit_ops,
5438 linux_supports_disable_randomization,
5439 linux_get_min_fast_tracepoint_insn_len,
5440 linux_qxfer_libraries_svr4,
5441 };
5442
5443 static void
5444 linux_init_signals ()
5445 {
5446 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5447 to find what the cancel signal actually is. */
5448 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5449 signal (__SIGRTMIN+1, SIG_IGN);
5450 #endif
5451 }
5452
5453 void
5454 initialize_low (void)
5455 {
5456 struct sigaction sigchld_action;
5457 memset (&sigchld_action, 0, sizeof (sigchld_action));
5458 set_target_ops (&linux_target_ops);
5459 set_breakpoint_data (the_low_target.breakpoint,
5460 the_low_target.breakpoint_len);
5461 linux_init_signals ();
5462 linux_test_for_tracefork ();
5463 #ifdef HAVE_LINUX_REGSETS
5464 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5465 ;
5466 disabled_regsets = xmalloc (num_regsets);
5467 #endif
5468
5469 sigchld_action.sa_handler = sigchld_handler;
5470 sigemptyset (&sigchld_action.sa_mask);
5471 sigchld_action.sa_flags = SA_RESTART;
5472 sigaction (SIGCHLD, &sigchld_action, NULL);
5473 }
This page took 0.147695 seconds and 4 git commands to generate.