2012-01-13 Pedro Alves <palves@redhat.com>
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include "linux-ptrace.h"
28 #include "linux-procfs.h"
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #ifndef ELFMAG0
46 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
50 #include <elf.h>
51 #endif
52
53 #ifndef SPUFS_MAGIC
54 #define SPUFS_MAGIC 0x23c9b64e
55 #endif
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif
63
64 #ifndef O_LARGEFILE
65 #define O_LARGEFILE 0
66 #endif
67
68 #ifndef W_STOPCODE
69 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
70 #endif
71
72 /* This is the kernel's hard limit. Not to be confused with
73 SIGRTMIN. */
74 #ifndef __SIGRTMIN
75 #define __SIGRTMIN 32
76 #endif
77
78 #ifdef __UCLIBC__
79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
80 #define HAS_NOMMU
81 #endif
82 #endif
83
84 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
85 representation of the thread ID.
86
87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
88 the same as the LWP ID.
89
90 ``all_processes'' is keyed by the "overall process ID", which
91 GNU/Linux calls tgid, "thread group ID". */
92
93 struct inferior_list all_lwps;
94
95 /* A list of all unknown processes which receive stop signals. Some other
96 process will presumably claim each of these as forked children
97 momentarily. */
98
99 struct inferior_list stopped_pids;
100
101 /* FIXME this is a bit of a hack, and could be removed. */
102 int stopping_threads;
103
104 /* FIXME make into a target method? */
105 int using_threads = 1;
106
107 /* True if we're presently stabilizing threads (moving them out of
108 jump pads). */
109 static int stabilizing_threads;
110
111 /* This flag is true iff we've just created or attached to our first
112 inferior but it has not stopped yet. As soon as it does, we need
113 to call the low target's arch_setup callback. Doing this only on
114 the first inferior avoids reinializing the architecture on every
115 inferior, and avoids messing with the register caches of the
116 already running inferiors. NOTE: this assumes all inferiors under
117 control of gdbserver have the same architecture. */
118 static int new_inferior;
119
120 static void linux_resume_one_lwp (struct lwp_info *lwp,
121 int step, int signal, siginfo_t *info);
122 static void linux_resume (struct thread_resume *resume_info, size_t n);
123 static void stop_all_lwps (int suspend, struct lwp_info *except);
124 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
125 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
126 static void *add_lwp (ptid_t ptid);
127 static int linux_stopped_by_watchpoint (void);
128 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
129 static void proceed_all_lwps (void);
130 static int finish_step_over (struct lwp_info *lwp);
131 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
132 static int kill_lwp (unsigned long lwpid, int signo);
133 static void linux_enable_event_reporting (int pid);
134
135 /* True if the low target can hardware single-step. Such targets
136 don't need a BREAKPOINT_REINSERT_ADDR callback. */
137
138 static int
139 can_hardware_single_step (void)
140 {
141 return (the_low_target.breakpoint_reinsert_addr == NULL);
142 }
143
144 /* True if the low target supports memory breakpoints. If so, we'll
145 have a GET_PC implementation. */
146
147 static int
148 supports_breakpoints (void)
149 {
150 return (the_low_target.get_pc != NULL);
151 }
152
153 /* Returns true if this target can support fast tracepoints. This
154 does not mean that the in-process agent has been loaded in the
155 inferior. */
156
157 static int
158 supports_fast_tracepoints (void)
159 {
160 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
161 }
162
163 struct pending_signals
164 {
165 int signal;
166 siginfo_t info;
167 struct pending_signals *prev;
168 };
169
170 #define PTRACE_ARG3_TYPE void *
171 #define PTRACE_ARG4_TYPE void *
172 #define PTRACE_XFER_TYPE long
173
174 #ifdef HAVE_LINUX_REGSETS
175 static char *disabled_regsets;
176 static int num_regsets;
177 #endif
178
179 /* The read/write ends of the pipe registered as waitable file in the
180 event loop. */
181 static int linux_event_pipe[2] = { -1, -1 };
182
183 /* True if we're currently in async mode. */
184 #define target_is_async_p() (linux_event_pipe[0] != -1)
185
186 static void send_sigstop (struct lwp_info *lwp);
187 static void wait_for_sigstop (struct inferior_list_entry *entry);
188
189 /* Accepts an integer PID; Returns a string representing a file that
190 can be opened to get info for the child process.
191 Space for the result is malloc'd, caller must free. */
192
193 char *
194 linux_child_pid_to_exec_file (int pid)
195 {
196 char *name1, *name2;
197
198 name1 = xmalloc (MAXPATHLEN);
199 name2 = xmalloc (MAXPATHLEN);
200 memset (name2, 0, MAXPATHLEN);
201
202 sprintf (name1, "/proc/%d/exe", pid);
203 if (readlink (name1, name2, MAXPATHLEN) > 0)
204 {
205 free (name1);
206 return name2;
207 }
208 else
209 {
210 free (name2);
211 return name1;
212 }
213 }
214
215 /* Return non-zero if HEADER is a 64-bit ELF file. */
216
217 static int
218 elf_64_header_p (const Elf64_Ehdr *header)
219 {
220 return (header->e_ident[EI_MAG0] == ELFMAG0
221 && header->e_ident[EI_MAG1] == ELFMAG1
222 && header->e_ident[EI_MAG2] == ELFMAG2
223 && header->e_ident[EI_MAG3] == ELFMAG3
224 && header->e_ident[EI_CLASS] == ELFCLASS64);
225 }
226
227 /* Return non-zero if FILE is a 64-bit ELF file,
228 zero if the file is not a 64-bit ELF file,
229 and -1 if the file is not accessible or doesn't exist. */
230
231 int
232 elf_64_file_p (const char *file)
233 {
234 Elf64_Ehdr header;
235 int fd;
236
237 fd = open (file, O_RDONLY);
238 if (fd < 0)
239 return -1;
240
241 if (read (fd, &header, sizeof (header)) != sizeof (header))
242 {
243 close (fd);
244 return 0;
245 }
246 close (fd);
247
248 return elf_64_header_p (&header);
249 }
250
251 static void
252 delete_lwp (struct lwp_info *lwp)
253 {
254 remove_thread (get_lwp_thread (lwp));
255 remove_inferior (&all_lwps, &lwp->head);
256 free (lwp->arch_private);
257 free (lwp);
258 }
259
260 /* Add a process to the common process list, and set its private
261 data. */
262
263 static struct process_info *
264 linux_add_process (int pid, int attached)
265 {
266 struct process_info *proc;
267
268 /* Is this the first process? If so, then set the arch. */
269 if (all_processes.head == NULL)
270 new_inferior = 1;
271
272 proc = add_process (pid, attached);
273 proc->private = xcalloc (1, sizeof (*proc->private));
274
275 if (the_low_target.new_process != NULL)
276 proc->private->arch_private = the_low_target.new_process ();
277
278 return proc;
279 }
280
281 /* Wrapper function for waitpid which handles EINTR, and emulates
282 __WALL for systems where that is not available. */
283
284 static int
285 my_waitpid (int pid, int *status, int flags)
286 {
287 int ret, out_errno;
288
289 if (debug_threads)
290 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
291
292 if (flags & __WALL)
293 {
294 sigset_t block_mask, org_mask, wake_mask;
295 int wnohang;
296
297 wnohang = (flags & WNOHANG) != 0;
298 flags &= ~(__WALL | __WCLONE);
299 flags |= WNOHANG;
300
301 /* Block all signals while here. This avoids knowing about
302 LinuxThread's signals. */
303 sigfillset (&block_mask);
304 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
305
306 /* ... except during the sigsuspend below. */
307 sigemptyset (&wake_mask);
308
309 while (1)
310 {
311 /* Since all signals are blocked, there's no need to check
312 for EINTR here. */
313 ret = waitpid (pid, status, flags);
314 out_errno = errno;
315
316 if (ret == -1 && out_errno != ECHILD)
317 break;
318 else if (ret > 0)
319 break;
320
321 if (flags & __WCLONE)
322 {
323 /* We've tried both flavors now. If WNOHANG is set,
324 there's nothing else to do, just bail out. */
325 if (wnohang)
326 break;
327
328 if (debug_threads)
329 fprintf (stderr, "blocking\n");
330
331 /* Block waiting for signals. */
332 sigsuspend (&wake_mask);
333 }
334
335 flags ^= __WCLONE;
336 }
337
338 sigprocmask (SIG_SETMASK, &org_mask, NULL);
339 }
340 else
341 {
342 do
343 ret = waitpid (pid, status, flags);
344 while (ret == -1 && errno == EINTR);
345 out_errno = errno;
346 }
347
348 if (debug_threads)
349 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
350 pid, flags, status ? *status : -1, ret);
351
352 errno = out_errno;
353 return ret;
354 }
355
356 /* Handle a GNU/Linux extended wait response. If we see a clone
357 event, we need to add the new LWP to our list (and not report the
358 trap to higher layers). */
359
360 static void
361 handle_extended_wait (struct lwp_info *event_child, int wstat)
362 {
363 int event = wstat >> 16;
364 struct lwp_info *new_lwp;
365
366 if (event == PTRACE_EVENT_CLONE)
367 {
368 ptid_t ptid;
369 unsigned long new_pid;
370 int ret, status = W_STOPCODE (SIGSTOP);
371
372 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
373
374 /* If we haven't already seen the new PID stop, wait for it now. */
375 if (! pull_pid_from_list (&stopped_pids, new_pid))
376 {
377 /* The new child has a pending SIGSTOP. We can't affect it until it
378 hits the SIGSTOP, but we're already attached. */
379
380 ret = my_waitpid (new_pid, &status, __WALL);
381
382 if (ret == -1)
383 perror_with_name ("waiting for new child");
384 else if (ret != new_pid)
385 warning ("wait returned unexpected PID %d", ret);
386 else if (!WIFSTOPPED (status))
387 warning ("wait returned unexpected status 0x%x", status);
388 }
389
390 linux_enable_event_reporting (new_pid);
391
392 ptid = ptid_build (pid_of (event_child), new_pid, 0);
393 new_lwp = (struct lwp_info *) add_lwp (ptid);
394 add_thread (ptid, new_lwp);
395
396 /* Either we're going to immediately resume the new thread
397 or leave it stopped. linux_resume_one_lwp is a nop if it
398 thinks the thread is currently running, so set this first
399 before calling linux_resume_one_lwp. */
400 new_lwp->stopped = 1;
401
402 /* Normally we will get the pending SIGSTOP. But in some cases
403 we might get another signal delivered to the group first.
404 If we do get another signal, be sure not to lose it. */
405 if (WSTOPSIG (status) == SIGSTOP)
406 {
407 if (stopping_threads)
408 new_lwp->stop_pc = get_stop_pc (new_lwp);
409 else
410 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
411 }
412 else
413 {
414 new_lwp->stop_expected = 1;
415
416 if (stopping_threads)
417 {
418 new_lwp->stop_pc = get_stop_pc (new_lwp);
419 new_lwp->status_pending_p = 1;
420 new_lwp->status_pending = status;
421 }
422 else
423 /* Pass the signal on. This is what GDB does - except
424 shouldn't we really report it instead? */
425 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
426 }
427
428 /* Always resume the current thread. If we are stopping
429 threads, it will have a pending SIGSTOP; we may as well
430 collect it now. */
431 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
432 }
433 }
434
435 /* Return the PC as read from the regcache of LWP, without any
436 adjustment. */
437
438 static CORE_ADDR
439 get_pc (struct lwp_info *lwp)
440 {
441 struct thread_info *saved_inferior;
442 struct regcache *regcache;
443 CORE_ADDR pc;
444
445 if (the_low_target.get_pc == NULL)
446 return 0;
447
448 saved_inferior = current_inferior;
449 current_inferior = get_lwp_thread (lwp);
450
451 regcache = get_thread_regcache (current_inferior, 1);
452 pc = (*the_low_target.get_pc) (regcache);
453
454 if (debug_threads)
455 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
456
457 current_inferior = saved_inferior;
458 return pc;
459 }
460
461 /* This function should only be called if LWP got a SIGTRAP.
462 The SIGTRAP could mean several things.
463
464 On i386, where decr_pc_after_break is non-zero:
465 If we were single-stepping this process using PTRACE_SINGLESTEP,
466 we will get only the one SIGTRAP (even if the instruction we
467 stepped over was a breakpoint). The value of $eip will be the
468 next instruction.
469 If we continue the process using PTRACE_CONT, we will get a
470 SIGTRAP when we hit a breakpoint. The value of $eip will be
471 the instruction after the breakpoint (i.e. needs to be
472 decremented). If we report the SIGTRAP to GDB, we must also
473 report the undecremented PC. If we cancel the SIGTRAP, we
474 must resume at the decremented PC.
475
476 (Presumably, not yet tested) On a non-decr_pc_after_break machine
477 with hardware or kernel single-step:
478 If we single-step over a breakpoint instruction, our PC will
479 point at the following instruction. If we continue and hit a
480 breakpoint instruction, our PC will point at the breakpoint
481 instruction. */
482
483 static CORE_ADDR
484 get_stop_pc (struct lwp_info *lwp)
485 {
486 CORE_ADDR stop_pc;
487
488 if (the_low_target.get_pc == NULL)
489 return 0;
490
491 stop_pc = get_pc (lwp);
492
493 if (WSTOPSIG (lwp->last_status) == SIGTRAP
494 && !lwp->stepping
495 && !lwp->stopped_by_watchpoint
496 && lwp->last_status >> 16 == 0)
497 stop_pc -= the_low_target.decr_pc_after_break;
498
499 if (debug_threads)
500 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
501
502 return stop_pc;
503 }
504
505 static void *
506 add_lwp (ptid_t ptid)
507 {
508 struct lwp_info *lwp;
509
510 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
511 memset (lwp, 0, sizeof (*lwp));
512
513 lwp->head.id = ptid;
514
515 if (the_low_target.new_thread != NULL)
516 lwp->arch_private = the_low_target.new_thread ();
517
518 add_inferior_to_list (&all_lwps, &lwp->head);
519
520 return lwp;
521 }
522
523 /* Start an inferior process and returns its pid.
524 ALLARGS is a vector of program-name and args. */
525
526 static int
527 linux_create_inferior (char *program, char **allargs)
528 {
529 #ifdef HAVE_PERSONALITY
530 int personality_orig = 0, personality_set = 0;
531 #endif
532 struct lwp_info *new_lwp;
533 int pid;
534 ptid_t ptid;
535
536 #ifdef HAVE_PERSONALITY
537 if (disable_randomization)
538 {
539 errno = 0;
540 personality_orig = personality (0xffffffff);
541 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
542 {
543 personality_set = 1;
544 personality (personality_orig | ADDR_NO_RANDOMIZE);
545 }
546 if (errno != 0 || (personality_set
547 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
548 warning ("Error disabling address space randomization: %s",
549 strerror (errno));
550 }
551 #endif
552
553 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
554 pid = vfork ();
555 #else
556 pid = fork ();
557 #endif
558 if (pid < 0)
559 perror_with_name ("fork");
560
561 if (pid == 0)
562 {
563 ptrace (PTRACE_TRACEME, 0, 0, 0);
564
565 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
566 signal (__SIGRTMIN + 1, SIG_DFL);
567 #endif
568
569 setpgid (0, 0);
570
571 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
572 stdout to stderr so that inferior i/o doesn't corrupt the connection.
573 Also, redirect stdin to /dev/null. */
574 if (remote_connection_is_stdio ())
575 {
576 close (0);
577 open ("/dev/null", O_RDONLY);
578 dup2 (2, 1);
579 if (write (2, "stdin/stdout redirected\n",
580 sizeof ("stdin/stdout redirected\n") - 1) < 0)
581 /* Errors ignored. */;
582 }
583
584 execv (program, allargs);
585 if (errno == ENOENT)
586 execvp (program, allargs);
587
588 fprintf (stderr, "Cannot exec %s: %s.\n", program,
589 strerror (errno));
590 fflush (stderr);
591 _exit (0177);
592 }
593
594 #ifdef HAVE_PERSONALITY
595 if (personality_set)
596 {
597 errno = 0;
598 personality (personality_orig);
599 if (errno != 0)
600 warning ("Error restoring address space randomization: %s",
601 strerror (errno));
602 }
603 #endif
604
605 linux_add_process (pid, 0);
606
607 ptid = ptid_build (pid, pid, 0);
608 new_lwp = add_lwp (ptid);
609 add_thread (ptid, new_lwp);
610 new_lwp->must_set_ptrace_flags = 1;
611
612 return pid;
613 }
614
615 /* Attach to an inferior process. */
616
617 static void
618 linux_attach_lwp_1 (unsigned long lwpid, int initial)
619 {
620 ptid_t ptid;
621 struct lwp_info *new_lwp;
622
623 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
624 {
625 if (!initial)
626 {
627 /* If we fail to attach to an LWP, just warn. */
628 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
629 strerror (errno), errno);
630 fflush (stderr);
631 return;
632 }
633 else
634 /* If we fail to attach to a process, report an error. */
635 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
636 strerror (errno), errno);
637 }
638
639 if (initial)
640 /* If lwp is the tgid, we handle adding existing threads later.
641 Otherwise we just add lwp without bothering about any other
642 threads. */
643 ptid = ptid_build (lwpid, lwpid, 0);
644 else
645 {
646 /* Note that extracting the pid from the current inferior is
647 safe, since we're always called in the context of the same
648 process as this new thread. */
649 int pid = pid_of (get_thread_lwp (current_inferior));
650 ptid = ptid_build (pid, lwpid, 0);
651 }
652
653 new_lwp = (struct lwp_info *) add_lwp (ptid);
654 add_thread (ptid, new_lwp);
655
656 /* We need to wait for SIGSTOP before being able to make the next
657 ptrace call on this LWP. */
658 new_lwp->must_set_ptrace_flags = 1;
659
660 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
661 brings it to a halt.
662
663 There are several cases to consider here:
664
665 1) gdbserver has already attached to the process and is being notified
666 of a new thread that is being created.
667 In this case we should ignore that SIGSTOP and resume the
668 process. This is handled below by setting stop_expected = 1,
669 and the fact that add_thread sets last_resume_kind ==
670 resume_continue.
671
672 2) This is the first thread (the process thread), and we're attaching
673 to it via attach_inferior.
674 In this case we want the process thread to stop.
675 This is handled by having linux_attach set last_resume_kind ==
676 resume_stop after we return.
677
678 If the pid we are attaching to is also the tgid, we attach to and
679 stop all the existing threads. Otherwise, we attach to pid and
680 ignore any other threads in the same group as this pid.
681
682 3) GDB is connecting to gdbserver and is requesting an enumeration of all
683 existing threads.
684 In this case we want the thread to stop.
685 FIXME: This case is currently not properly handled.
686 We should wait for the SIGSTOP but don't. Things work apparently
687 because enough time passes between when we ptrace (ATTACH) and when
688 gdb makes the next ptrace call on the thread.
689
690 On the other hand, if we are currently trying to stop all threads, we
691 should treat the new thread as if we had sent it a SIGSTOP. This works
692 because we are guaranteed that the add_lwp call above added us to the
693 end of the list, and so the new thread has not yet reached
694 wait_for_sigstop (but will). */
695 new_lwp->stop_expected = 1;
696 }
697
698 void
699 linux_attach_lwp (unsigned long lwpid)
700 {
701 linux_attach_lwp_1 (lwpid, 0);
702 }
703
704 /* Attach to PID. If PID is the tgid, attach to it and all
705 of its threads. */
706
707 int
708 linux_attach (unsigned long pid)
709 {
710 /* Attach to PID. We will check for other threads
711 soon. */
712 linux_attach_lwp_1 (pid, 1);
713 linux_add_process (pid, 1);
714
715 if (!non_stop)
716 {
717 struct thread_info *thread;
718
719 /* Don't ignore the initial SIGSTOP if we just attached to this
720 process. It will be collected by wait shortly. */
721 thread = find_thread_ptid (ptid_build (pid, pid, 0));
722 thread->last_resume_kind = resume_stop;
723 }
724
725 if (linux_proc_get_tgid (pid) == pid)
726 {
727 DIR *dir;
728 char pathname[128];
729
730 sprintf (pathname, "/proc/%ld/task", pid);
731
732 dir = opendir (pathname);
733
734 if (!dir)
735 {
736 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
737 fflush (stderr);
738 }
739 else
740 {
741 /* At this point we attached to the tgid. Scan the task for
742 existing threads. */
743 unsigned long lwp;
744 int new_threads_found;
745 int iterations = 0;
746 struct dirent *dp;
747
748 while (iterations < 2)
749 {
750 new_threads_found = 0;
751 /* Add all the other threads. While we go through the
752 threads, new threads may be spawned. Cycle through
753 the list of threads until we have done two iterations without
754 finding new threads. */
755 while ((dp = readdir (dir)) != NULL)
756 {
757 /* Fetch one lwp. */
758 lwp = strtoul (dp->d_name, NULL, 10);
759
760 /* Is this a new thread? */
761 if (lwp
762 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
763 {
764 linux_attach_lwp_1 (lwp, 0);
765 new_threads_found++;
766
767 if (debug_threads)
768 fprintf (stderr, "\
769 Found and attached to new lwp %ld\n", lwp);
770 }
771 }
772
773 if (!new_threads_found)
774 iterations++;
775 else
776 iterations = 0;
777
778 rewinddir (dir);
779 }
780 closedir (dir);
781 }
782 }
783
784 return 0;
785 }
786
787 struct counter
788 {
789 int pid;
790 int count;
791 };
792
793 static int
794 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
795 {
796 struct counter *counter = args;
797
798 if (ptid_get_pid (entry->id) == counter->pid)
799 {
800 if (++counter->count > 1)
801 return 1;
802 }
803
804 return 0;
805 }
806
807 static int
808 last_thread_of_process_p (struct thread_info *thread)
809 {
810 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
811 int pid = ptid_get_pid (ptid);
812 struct counter counter = { pid , 0 };
813
814 return (find_inferior (&all_threads,
815 second_thread_of_pid_p, &counter) == NULL);
816 }
817
818 /* Kill the inferior lwp. */
819
820 static int
821 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
822 {
823 struct thread_info *thread = (struct thread_info *) entry;
824 struct lwp_info *lwp = get_thread_lwp (thread);
825 int wstat;
826 int pid = * (int *) args;
827
828 if (ptid_get_pid (entry->id) != pid)
829 return 0;
830
831 /* We avoid killing the first thread here, because of a Linux kernel (at
832 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
833 the children get a chance to be reaped, it will remain a zombie
834 forever. */
835
836 if (lwpid_of (lwp) == pid)
837 {
838 if (debug_threads)
839 fprintf (stderr, "lkop: is last of process %s\n",
840 target_pid_to_str (entry->id));
841 return 0;
842 }
843
844 do
845 {
846 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
847
848 /* Make sure it died. The loop is most likely unnecessary. */
849 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
850 } while (pid > 0 && WIFSTOPPED (wstat));
851
852 return 0;
853 }
854
855 static int
856 linux_kill (int pid)
857 {
858 struct process_info *process;
859 struct lwp_info *lwp;
860 int wstat;
861 int lwpid;
862
863 process = find_process_pid (pid);
864 if (process == NULL)
865 return -1;
866
867 /* If we're killing a running inferior, make sure it is stopped
868 first, as PTRACE_KILL will not work otherwise. */
869 stop_all_lwps (0, NULL);
870
871 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
872
873 /* See the comment in linux_kill_one_lwp. We did not kill the first
874 thread in the list, so do so now. */
875 lwp = find_lwp_pid (pid_to_ptid (pid));
876
877 if (lwp == NULL)
878 {
879 if (debug_threads)
880 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
881 lwpid_of (lwp), pid);
882 }
883 else
884 {
885 if (debug_threads)
886 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
887 lwpid_of (lwp), pid);
888
889 do
890 {
891 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
892
893 /* Make sure it died. The loop is most likely unnecessary. */
894 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
895 } while (lwpid > 0 && WIFSTOPPED (wstat));
896 }
897
898 the_target->mourn (process);
899
900 /* Since we presently can only stop all lwps of all processes, we
901 need to unstop lwps of other processes. */
902 unstop_all_lwps (0, NULL);
903 return 0;
904 }
905
906 static int
907 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
908 {
909 struct thread_info *thread = (struct thread_info *) entry;
910 struct lwp_info *lwp = get_thread_lwp (thread);
911 int pid = * (int *) args;
912
913 if (ptid_get_pid (entry->id) != pid)
914 return 0;
915
916 /* If this process is stopped but is expecting a SIGSTOP, then make
917 sure we take care of that now. This isn't absolutely guaranteed
918 to collect the SIGSTOP, but is fairly likely to. */
919 if (lwp->stop_expected)
920 {
921 int wstat;
922 /* Clear stop_expected, so that the SIGSTOP will be reported. */
923 lwp->stop_expected = 0;
924 linux_resume_one_lwp (lwp, 0, 0, NULL);
925 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
926 }
927
928 /* Flush any pending changes to the process's registers. */
929 regcache_invalidate_one ((struct inferior_list_entry *)
930 get_lwp_thread (lwp));
931
932 /* Finally, let it resume. */
933 if (the_low_target.prepare_to_resume != NULL)
934 the_low_target.prepare_to_resume (lwp);
935 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
936
937 delete_lwp (lwp);
938 return 0;
939 }
940
941 static int
942 linux_detach (int pid)
943 {
944 struct process_info *process;
945
946 process = find_process_pid (pid);
947 if (process == NULL)
948 return -1;
949
950 /* Stop all threads before detaching. First, ptrace requires that
951 the thread is stopped to sucessfully detach. Second, thread_db
952 may need to uninstall thread event breakpoints from memory, which
953 only works with a stopped process anyway. */
954 stop_all_lwps (0, NULL);
955
956 #ifdef USE_THREAD_DB
957 thread_db_detach (process);
958 #endif
959
960 /* Stabilize threads (move out of jump pads). */
961 stabilize_threads ();
962
963 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
964
965 the_target->mourn (process);
966
967 /* Since we presently can only stop all lwps of all processes, we
968 need to unstop lwps of other processes. */
969 unstop_all_lwps (0, NULL);
970 return 0;
971 }
972
973 /* Remove all LWPs that belong to process PROC from the lwp list. */
974
975 static int
976 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
977 {
978 struct lwp_info *lwp = (struct lwp_info *) entry;
979 struct process_info *process = proc;
980
981 if (pid_of (lwp) == pid_of (process))
982 delete_lwp (lwp);
983
984 return 0;
985 }
986
987 static void
988 linux_mourn (struct process_info *process)
989 {
990 struct process_info_private *priv;
991
992 #ifdef USE_THREAD_DB
993 thread_db_mourn (process);
994 #endif
995
996 find_inferior (&all_lwps, delete_lwp_callback, process);
997
998 /* Freeing all private data. */
999 priv = process->private;
1000 free (priv->arch_private);
1001 free (priv);
1002 process->private = NULL;
1003
1004 remove_process (process);
1005 }
1006
1007 static void
1008 linux_join (int pid)
1009 {
1010 int status, ret;
1011
1012 do {
1013 ret = my_waitpid (pid, &status, 0);
1014 if (WIFEXITED (status) || WIFSIGNALED (status))
1015 break;
1016 } while (ret != -1 || errno != ECHILD);
1017 }
1018
1019 /* Return nonzero if the given thread is still alive. */
1020 static int
1021 linux_thread_alive (ptid_t ptid)
1022 {
1023 struct lwp_info *lwp = find_lwp_pid (ptid);
1024
1025 /* We assume we always know if a thread exits. If a whole process
1026 exited but we still haven't been able to report it to GDB, we'll
1027 hold on to the last lwp of the dead process. */
1028 if (lwp != NULL)
1029 return !lwp->dead;
1030 else
1031 return 0;
1032 }
1033
1034 /* Return 1 if this lwp has an interesting status pending. */
1035 static int
1036 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1037 {
1038 struct lwp_info *lwp = (struct lwp_info *) entry;
1039 ptid_t ptid = * (ptid_t *) arg;
1040 struct thread_info *thread;
1041
1042 /* Check if we're only interested in events from a specific process
1043 or its lwps. */
1044 if (!ptid_equal (minus_one_ptid, ptid)
1045 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1046 return 0;
1047
1048 thread = get_lwp_thread (lwp);
1049
1050 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1051 report any status pending the LWP may have. */
1052 if (thread->last_resume_kind == resume_stop
1053 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1054 return 0;
1055
1056 return lwp->status_pending_p;
1057 }
1058
1059 static int
1060 same_lwp (struct inferior_list_entry *entry, void *data)
1061 {
1062 ptid_t ptid = *(ptid_t *) data;
1063 int lwp;
1064
1065 if (ptid_get_lwp (ptid) != 0)
1066 lwp = ptid_get_lwp (ptid);
1067 else
1068 lwp = ptid_get_pid (ptid);
1069
1070 if (ptid_get_lwp (entry->id) == lwp)
1071 return 1;
1072
1073 return 0;
1074 }
1075
1076 struct lwp_info *
1077 find_lwp_pid (ptid_t ptid)
1078 {
1079 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1080 }
1081
1082 static struct lwp_info *
1083 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1084 {
1085 int ret;
1086 int to_wait_for = -1;
1087 struct lwp_info *child = NULL;
1088
1089 if (debug_threads)
1090 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1091
1092 if (ptid_equal (ptid, minus_one_ptid))
1093 to_wait_for = -1; /* any child */
1094 else
1095 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1096
1097 options |= __WALL;
1098
1099 retry:
1100
1101 ret = my_waitpid (to_wait_for, wstatp, options);
1102 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1103 return NULL;
1104 else if (ret == -1)
1105 perror_with_name ("waitpid");
1106
1107 if (debug_threads
1108 && (!WIFSTOPPED (*wstatp)
1109 || (WSTOPSIG (*wstatp) != 32
1110 && WSTOPSIG (*wstatp) != 33)))
1111 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1112
1113 child = find_lwp_pid (pid_to_ptid (ret));
1114
1115 /* If we didn't find a process, one of two things presumably happened:
1116 - A process we started and then detached from has exited. Ignore it.
1117 - A process we are controlling has forked and the new child's stop
1118 was reported to us by the kernel. Save its PID. */
1119 if (child == NULL && WIFSTOPPED (*wstatp))
1120 {
1121 add_pid_to_list (&stopped_pids, ret);
1122 goto retry;
1123 }
1124 else if (child == NULL)
1125 goto retry;
1126
1127 child->stopped = 1;
1128
1129 child->last_status = *wstatp;
1130
1131 /* Architecture-specific setup after inferior is running.
1132 This needs to happen after we have attached to the inferior
1133 and it is stopped for the first time, but before we access
1134 any inferior registers. */
1135 if (new_inferior)
1136 {
1137 the_low_target.arch_setup ();
1138 #ifdef HAVE_LINUX_REGSETS
1139 memset (disabled_regsets, 0, num_regsets);
1140 #endif
1141 new_inferior = 0;
1142 }
1143
1144 /* Fetch the possibly triggered data watchpoint info and store it in
1145 CHILD.
1146
1147 On some archs, like x86, that use debug registers to set
1148 watchpoints, it's possible that the way to know which watched
1149 address trapped, is to check the register that is used to select
1150 which address to watch. Problem is, between setting the
1151 watchpoint and reading back which data address trapped, the user
1152 may change the set of watchpoints, and, as a consequence, GDB
1153 changes the debug registers in the inferior. To avoid reading
1154 back a stale stopped-data-address when that happens, we cache in
1155 LP the fact that a watchpoint trapped, and the corresponding data
1156 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1157 changes the debug registers meanwhile, we have the cached data we
1158 can rely on. */
1159
1160 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1161 {
1162 if (the_low_target.stopped_by_watchpoint == NULL)
1163 {
1164 child->stopped_by_watchpoint = 0;
1165 }
1166 else
1167 {
1168 struct thread_info *saved_inferior;
1169
1170 saved_inferior = current_inferior;
1171 current_inferior = get_lwp_thread (child);
1172
1173 child->stopped_by_watchpoint
1174 = the_low_target.stopped_by_watchpoint ();
1175
1176 if (child->stopped_by_watchpoint)
1177 {
1178 if (the_low_target.stopped_data_address != NULL)
1179 child->stopped_data_address
1180 = the_low_target.stopped_data_address ();
1181 else
1182 child->stopped_data_address = 0;
1183 }
1184
1185 current_inferior = saved_inferior;
1186 }
1187 }
1188
1189 /* Store the STOP_PC, with adjustment applied. This depends on the
1190 architecture being defined already (so that CHILD has a valid
1191 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1192 not). */
1193 if (WIFSTOPPED (*wstatp))
1194 child->stop_pc = get_stop_pc (child);
1195
1196 if (debug_threads
1197 && WIFSTOPPED (*wstatp)
1198 && the_low_target.get_pc != NULL)
1199 {
1200 struct thread_info *saved_inferior = current_inferior;
1201 struct regcache *regcache;
1202 CORE_ADDR pc;
1203
1204 current_inferior = get_lwp_thread (child);
1205 regcache = get_thread_regcache (current_inferior, 1);
1206 pc = (*the_low_target.get_pc) (regcache);
1207 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1208 current_inferior = saved_inferior;
1209 }
1210
1211 return child;
1212 }
1213
1214 /* This function should only be called if the LWP got a SIGTRAP.
1215
1216 Handle any tracepoint steps or hits. Return true if a tracepoint
1217 event was handled, 0 otherwise. */
1218
1219 static int
1220 handle_tracepoints (struct lwp_info *lwp)
1221 {
1222 struct thread_info *tinfo = get_lwp_thread (lwp);
1223 int tpoint_related_event = 0;
1224
1225 /* If this tracepoint hit causes a tracing stop, we'll immediately
1226 uninsert tracepoints. To do this, we temporarily pause all
1227 threads, unpatch away, and then unpause threads. We need to make
1228 sure the unpausing doesn't resume LWP too. */
1229 lwp->suspended++;
1230
1231 /* And we need to be sure that any all-threads-stopping doesn't try
1232 to move threads out of the jump pads, as it could deadlock the
1233 inferior (LWP could be in the jump pad, maybe even holding the
1234 lock.) */
1235
1236 /* Do any necessary step collect actions. */
1237 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1238
1239 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1240
1241 /* See if we just hit a tracepoint and do its main collect
1242 actions. */
1243 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1244
1245 lwp->suspended--;
1246
1247 gdb_assert (lwp->suspended == 0);
1248 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1249
1250 if (tpoint_related_event)
1251 {
1252 if (debug_threads)
1253 fprintf (stderr, "got a tracepoint event\n");
1254 return 1;
1255 }
1256
1257 return 0;
1258 }
1259
1260 /* Convenience wrapper. Returns true if LWP is presently collecting a
1261 fast tracepoint. */
1262
1263 static int
1264 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1265 struct fast_tpoint_collect_status *status)
1266 {
1267 CORE_ADDR thread_area;
1268
1269 if (the_low_target.get_thread_area == NULL)
1270 return 0;
1271
1272 /* Get the thread area address. This is used to recognize which
1273 thread is which when tracing with the in-process agent library.
1274 We don't read anything from the address, and treat it as opaque;
1275 it's the address itself that we assume is unique per-thread. */
1276 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1277 return 0;
1278
1279 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1280 }
1281
1282 /* The reason we resume in the caller, is because we want to be able
1283 to pass lwp->status_pending as WSTAT, and we need to clear
1284 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1285 refuses to resume. */
1286
1287 static int
1288 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1289 {
1290 struct thread_info *saved_inferior;
1291
1292 saved_inferior = current_inferior;
1293 current_inferior = get_lwp_thread (lwp);
1294
1295 if ((wstat == NULL
1296 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1297 && supports_fast_tracepoints ()
1298 && in_process_agent_loaded ())
1299 {
1300 struct fast_tpoint_collect_status status;
1301 int r;
1302
1303 if (debug_threads)
1304 fprintf (stderr, "\
1305 Checking whether LWP %ld needs to move out of the jump pad.\n",
1306 lwpid_of (lwp));
1307
1308 r = linux_fast_tracepoint_collecting (lwp, &status);
1309
1310 if (wstat == NULL
1311 || (WSTOPSIG (*wstat) != SIGILL
1312 && WSTOPSIG (*wstat) != SIGFPE
1313 && WSTOPSIG (*wstat) != SIGSEGV
1314 && WSTOPSIG (*wstat) != SIGBUS))
1315 {
1316 lwp->collecting_fast_tracepoint = r;
1317
1318 if (r != 0)
1319 {
1320 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1321 {
1322 /* Haven't executed the original instruction yet.
1323 Set breakpoint there, and wait till it's hit,
1324 then single-step until exiting the jump pad. */
1325 lwp->exit_jump_pad_bkpt
1326 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1327 }
1328
1329 if (debug_threads)
1330 fprintf (stderr, "\
1331 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1332 lwpid_of (lwp));
1333 current_inferior = saved_inferior;
1334
1335 return 1;
1336 }
1337 }
1338 else
1339 {
1340 /* If we get a synchronous signal while collecting, *and*
1341 while executing the (relocated) original instruction,
1342 reset the PC to point at the tpoint address, before
1343 reporting to GDB. Otherwise, it's an IPA lib bug: just
1344 report the signal to GDB, and pray for the best. */
1345
1346 lwp->collecting_fast_tracepoint = 0;
1347
1348 if (r != 0
1349 && (status.adjusted_insn_addr <= lwp->stop_pc
1350 && lwp->stop_pc < status.adjusted_insn_addr_end))
1351 {
1352 siginfo_t info;
1353 struct regcache *regcache;
1354
1355 /* The si_addr on a few signals references the address
1356 of the faulting instruction. Adjust that as
1357 well. */
1358 if ((WSTOPSIG (*wstat) == SIGILL
1359 || WSTOPSIG (*wstat) == SIGFPE
1360 || WSTOPSIG (*wstat) == SIGBUS
1361 || WSTOPSIG (*wstat) == SIGSEGV)
1362 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1363 /* Final check just to make sure we don't clobber
1364 the siginfo of non-kernel-sent signals. */
1365 && (uintptr_t) info.si_addr == lwp->stop_pc)
1366 {
1367 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1368 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1369 }
1370
1371 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1372 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1373 lwp->stop_pc = status.tpoint_addr;
1374
1375 /* Cancel any fast tracepoint lock this thread was
1376 holding. */
1377 force_unlock_trace_buffer ();
1378 }
1379
1380 if (lwp->exit_jump_pad_bkpt != NULL)
1381 {
1382 if (debug_threads)
1383 fprintf (stderr,
1384 "Cancelling fast exit-jump-pad: removing bkpt. "
1385 "stopping all threads momentarily.\n");
1386
1387 stop_all_lwps (1, lwp);
1388 cancel_breakpoints ();
1389
1390 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1391 lwp->exit_jump_pad_bkpt = NULL;
1392
1393 unstop_all_lwps (1, lwp);
1394
1395 gdb_assert (lwp->suspended >= 0);
1396 }
1397 }
1398 }
1399
1400 if (debug_threads)
1401 fprintf (stderr, "\
1402 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1403 lwpid_of (lwp));
1404
1405 current_inferior = saved_inferior;
1406 return 0;
1407 }
1408
1409 /* Enqueue one signal in the "signals to report later when out of the
1410 jump pad" list. */
1411
1412 static void
1413 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1414 {
1415 struct pending_signals *p_sig;
1416
1417 if (debug_threads)
1418 fprintf (stderr, "\
1419 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1420
1421 if (debug_threads)
1422 {
1423 struct pending_signals *sig;
1424
1425 for (sig = lwp->pending_signals_to_report;
1426 sig != NULL;
1427 sig = sig->prev)
1428 fprintf (stderr,
1429 " Already queued %d\n",
1430 sig->signal);
1431
1432 fprintf (stderr, " (no more currently queued signals)\n");
1433 }
1434
1435 /* Don't enqueue non-RT signals if they are already in the deferred
1436 queue. (SIGSTOP being the easiest signal to see ending up here
1437 twice) */
1438 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1439 {
1440 struct pending_signals *sig;
1441
1442 for (sig = lwp->pending_signals_to_report;
1443 sig != NULL;
1444 sig = sig->prev)
1445 {
1446 if (sig->signal == WSTOPSIG (*wstat))
1447 {
1448 if (debug_threads)
1449 fprintf (stderr,
1450 "Not requeuing already queued non-RT signal %d"
1451 " for LWP %ld\n",
1452 sig->signal,
1453 lwpid_of (lwp));
1454 return;
1455 }
1456 }
1457 }
1458
1459 p_sig = xmalloc (sizeof (*p_sig));
1460 p_sig->prev = lwp->pending_signals_to_report;
1461 p_sig->signal = WSTOPSIG (*wstat);
1462 memset (&p_sig->info, 0, sizeof (siginfo_t));
1463 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1464
1465 lwp->pending_signals_to_report = p_sig;
1466 }
1467
1468 /* Dequeue one signal from the "signals to report later when out of
1469 the jump pad" list. */
1470
1471 static int
1472 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1473 {
1474 if (lwp->pending_signals_to_report != NULL)
1475 {
1476 struct pending_signals **p_sig;
1477
1478 p_sig = &lwp->pending_signals_to_report;
1479 while ((*p_sig)->prev != NULL)
1480 p_sig = &(*p_sig)->prev;
1481
1482 *wstat = W_STOPCODE ((*p_sig)->signal);
1483 if ((*p_sig)->info.si_signo != 0)
1484 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1485 free (*p_sig);
1486 *p_sig = NULL;
1487
1488 if (debug_threads)
1489 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1490 WSTOPSIG (*wstat), lwpid_of (lwp));
1491
1492 if (debug_threads)
1493 {
1494 struct pending_signals *sig;
1495
1496 for (sig = lwp->pending_signals_to_report;
1497 sig != NULL;
1498 sig = sig->prev)
1499 fprintf (stderr,
1500 " Still queued %d\n",
1501 sig->signal);
1502
1503 fprintf (stderr, " (no more queued signals)\n");
1504 }
1505
1506 return 1;
1507 }
1508
1509 return 0;
1510 }
1511
1512 /* Arrange for a breakpoint to be hit again later. We don't keep the
1513 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1514 will handle the current event, eventually we will resume this LWP,
1515 and this breakpoint will trap again. */
1516
1517 static int
1518 cancel_breakpoint (struct lwp_info *lwp)
1519 {
1520 struct thread_info *saved_inferior;
1521
1522 /* There's nothing to do if we don't support breakpoints. */
1523 if (!supports_breakpoints ())
1524 return 0;
1525
1526 /* breakpoint_at reads from current inferior. */
1527 saved_inferior = current_inferior;
1528 current_inferior = get_lwp_thread (lwp);
1529
1530 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1531 {
1532 if (debug_threads)
1533 fprintf (stderr,
1534 "CB: Push back breakpoint for %s\n",
1535 target_pid_to_str (ptid_of (lwp)));
1536
1537 /* Back up the PC if necessary. */
1538 if (the_low_target.decr_pc_after_break)
1539 {
1540 struct regcache *regcache
1541 = get_thread_regcache (current_inferior, 1);
1542 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1543 }
1544
1545 current_inferior = saved_inferior;
1546 return 1;
1547 }
1548 else
1549 {
1550 if (debug_threads)
1551 fprintf (stderr,
1552 "CB: No breakpoint found at %s for [%s]\n",
1553 paddress (lwp->stop_pc),
1554 target_pid_to_str (ptid_of (lwp)));
1555 }
1556
1557 current_inferior = saved_inferior;
1558 return 0;
1559 }
1560
1561 /* When the event-loop is doing a step-over, this points at the thread
1562 being stepped. */
1563 ptid_t step_over_bkpt;
1564
1565 /* Wait for an event from child PID. If PID is -1, wait for any
1566 child. Store the stop status through the status pointer WSTAT.
1567 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1568 event was found and OPTIONS contains WNOHANG. Return the PID of
1569 the stopped child otherwise. */
1570
1571 static int
1572 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1573 {
1574 struct lwp_info *event_child, *requested_child;
1575
1576 event_child = NULL;
1577 requested_child = NULL;
1578
1579 /* Check for a lwp with a pending status. */
1580
1581 if (ptid_equal (ptid, minus_one_ptid)
1582 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1583 {
1584 event_child = (struct lwp_info *)
1585 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1586 if (debug_threads && event_child)
1587 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1588 }
1589 else
1590 {
1591 requested_child = find_lwp_pid (ptid);
1592
1593 if (!stopping_threads
1594 && requested_child->status_pending_p
1595 && requested_child->collecting_fast_tracepoint)
1596 {
1597 enqueue_one_deferred_signal (requested_child,
1598 &requested_child->status_pending);
1599 requested_child->status_pending_p = 0;
1600 requested_child->status_pending = 0;
1601 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1602 }
1603
1604 if (requested_child->suspended
1605 && requested_child->status_pending_p)
1606 fatal ("requesting an event out of a suspended child?");
1607
1608 if (requested_child->status_pending_p)
1609 event_child = requested_child;
1610 }
1611
1612 if (event_child != NULL)
1613 {
1614 if (debug_threads)
1615 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1616 lwpid_of (event_child), event_child->status_pending);
1617 *wstat = event_child->status_pending;
1618 event_child->status_pending_p = 0;
1619 event_child->status_pending = 0;
1620 current_inferior = get_lwp_thread (event_child);
1621 return lwpid_of (event_child);
1622 }
1623
1624 /* We only enter this loop if no process has a pending wait status. Thus
1625 any action taken in response to a wait status inside this loop is
1626 responding as soon as we detect the status, not after any pending
1627 events. */
1628 while (1)
1629 {
1630 event_child = linux_wait_for_lwp (ptid, wstat, options);
1631
1632 if ((options & WNOHANG) && event_child == NULL)
1633 {
1634 if (debug_threads)
1635 fprintf (stderr, "WNOHANG set, no event found\n");
1636 return 0;
1637 }
1638
1639 if (event_child == NULL)
1640 error ("event from unknown child");
1641
1642 current_inferior = get_lwp_thread (event_child);
1643
1644 /* Check for thread exit. */
1645 if (! WIFSTOPPED (*wstat))
1646 {
1647 if (debug_threads)
1648 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1649
1650 /* If the last thread is exiting, just return. */
1651 if (last_thread_of_process_p (current_inferior))
1652 {
1653 if (debug_threads)
1654 fprintf (stderr, "LWP %ld is last lwp of process\n",
1655 lwpid_of (event_child));
1656 return lwpid_of (event_child);
1657 }
1658
1659 if (!non_stop)
1660 {
1661 current_inferior = (struct thread_info *) all_threads.head;
1662 if (debug_threads)
1663 fprintf (stderr, "Current inferior is now %ld\n",
1664 lwpid_of (get_thread_lwp (current_inferior)));
1665 }
1666 else
1667 {
1668 current_inferior = NULL;
1669 if (debug_threads)
1670 fprintf (stderr, "Current inferior is now <NULL>\n");
1671 }
1672
1673 /* If we were waiting for this particular child to do something...
1674 well, it did something. */
1675 if (requested_child != NULL)
1676 {
1677 int lwpid = lwpid_of (event_child);
1678
1679 /* Cancel the step-over operation --- the thread that
1680 started it is gone. */
1681 if (finish_step_over (event_child))
1682 unstop_all_lwps (1, event_child);
1683 delete_lwp (event_child);
1684 return lwpid;
1685 }
1686
1687 delete_lwp (event_child);
1688
1689 /* Wait for a more interesting event. */
1690 continue;
1691 }
1692
1693 if (event_child->must_set_ptrace_flags)
1694 {
1695 linux_enable_event_reporting (lwpid_of (event_child));
1696 event_child->must_set_ptrace_flags = 0;
1697 }
1698
1699 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1700 && *wstat >> 16 != 0)
1701 {
1702 handle_extended_wait (event_child, *wstat);
1703 continue;
1704 }
1705
1706 if (WIFSTOPPED (*wstat)
1707 && WSTOPSIG (*wstat) == SIGSTOP
1708 && event_child->stop_expected)
1709 {
1710 int should_stop;
1711
1712 if (debug_threads)
1713 fprintf (stderr, "Expected stop.\n");
1714 event_child->stop_expected = 0;
1715
1716 should_stop = (current_inferior->last_resume_kind == resume_stop
1717 || stopping_threads);
1718
1719 if (!should_stop)
1720 {
1721 linux_resume_one_lwp (event_child,
1722 event_child->stepping, 0, NULL);
1723 continue;
1724 }
1725 }
1726
1727 return lwpid_of (event_child);
1728 }
1729
1730 /* NOTREACHED */
1731 return 0;
1732 }
1733
1734 static int
1735 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1736 {
1737 ptid_t wait_ptid;
1738
1739 if (ptid_is_pid (ptid))
1740 {
1741 /* A request to wait for a specific tgid. This is not possible
1742 with waitpid, so instead, we wait for any child, and leave
1743 children we're not interested in right now with a pending
1744 status to report later. */
1745 wait_ptid = minus_one_ptid;
1746 }
1747 else
1748 wait_ptid = ptid;
1749
1750 while (1)
1751 {
1752 int event_pid;
1753
1754 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1755
1756 if (event_pid > 0
1757 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1758 {
1759 struct lwp_info *event_child
1760 = find_lwp_pid (pid_to_ptid (event_pid));
1761
1762 if (! WIFSTOPPED (*wstat))
1763 mark_lwp_dead (event_child, *wstat);
1764 else
1765 {
1766 event_child->status_pending_p = 1;
1767 event_child->status_pending = *wstat;
1768 }
1769 }
1770 else
1771 return event_pid;
1772 }
1773 }
1774
1775
1776 /* Count the LWP's that have had events. */
1777
1778 static int
1779 count_events_callback (struct inferior_list_entry *entry, void *data)
1780 {
1781 struct lwp_info *lp = (struct lwp_info *) entry;
1782 struct thread_info *thread = get_lwp_thread (lp);
1783 int *count = data;
1784
1785 gdb_assert (count != NULL);
1786
1787 /* Count only resumed LWPs that have a SIGTRAP event pending that
1788 should be reported to GDB. */
1789 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1790 && thread->last_resume_kind != resume_stop
1791 && lp->status_pending_p
1792 && WIFSTOPPED (lp->status_pending)
1793 && WSTOPSIG (lp->status_pending) == SIGTRAP
1794 && !breakpoint_inserted_here (lp->stop_pc))
1795 (*count)++;
1796
1797 return 0;
1798 }
1799
1800 /* Select the LWP (if any) that is currently being single-stepped. */
1801
1802 static int
1803 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1804 {
1805 struct lwp_info *lp = (struct lwp_info *) entry;
1806 struct thread_info *thread = get_lwp_thread (lp);
1807
1808 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1809 && thread->last_resume_kind == resume_step
1810 && lp->status_pending_p)
1811 return 1;
1812 else
1813 return 0;
1814 }
1815
1816 /* Select the Nth LWP that has had a SIGTRAP event that should be
1817 reported to GDB. */
1818
1819 static int
1820 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1821 {
1822 struct lwp_info *lp = (struct lwp_info *) entry;
1823 struct thread_info *thread = get_lwp_thread (lp);
1824 int *selector = data;
1825
1826 gdb_assert (selector != NULL);
1827
1828 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1829 if (thread->last_resume_kind != resume_stop
1830 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1831 && lp->status_pending_p
1832 && WIFSTOPPED (lp->status_pending)
1833 && WSTOPSIG (lp->status_pending) == SIGTRAP
1834 && !breakpoint_inserted_here (lp->stop_pc))
1835 if ((*selector)-- == 0)
1836 return 1;
1837
1838 return 0;
1839 }
1840
1841 static int
1842 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1843 {
1844 struct lwp_info *lp = (struct lwp_info *) entry;
1845 struct thread_info *thread = get_lwp_thread (lp);
1846 struct lwp_info *event_lp = data;
1847
1848 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1849 if (lp == event_lp)
1850 return 0;
1851
1852 /* If a LWP other than the LWP that we're reporting an event for has
1853 hit a GDB breakpoint (as opposed to some random trap signal),
1854 then just arrange for it to hit it again later. We don't keep
1855 the SIGTRAP status and don't forward the SIGTRAP signal to the
1856 LWP. We will handle the current event, eventually we will resume
1857 all LWPs, and this one will get its breakpoint trap again.
1858
1859 If we do not do this, then we run the risk that the user will
1860 delete or disable the breakpoint, but the LWP will have already
1861 tripped on it. */
1862
1863 if (thread->last_resume_kind != resume_stop
1864 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1865 && lp->status_pending_p
1866 && WIFSTOPPED (lp->status_pending)
1867 && WSTOPSIG (lp->status_pending) == SIGTRAP
1868 && !lp->stepping
1869 && !lp->stopped_by_watchpoint
1870 && cancel_breakpoint (lp))
1871 /* Throw away the SIGTRAP. */
1872 lp->status_pending_p = 0;
1873
1874 return 0;
1875 }
1876
1877 static void
1878 linux_cancel_breakpoints (void)
1879 {
1880 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1881 }
1882
1883 /* Select one LWP out of those that have events pending. */
1884
1885 static void
1886 select_event_lwp (struct lwp_info **orig_lp)
1887 {
1888 int num_events = 0;
1889 int random_selector;
1890 struct lwp_info *event_lp;
1891
1892 /* Give preference to any LWP that is being single-stepped. */
1893 event_lp
1894 = (struct lwp_info *) find_inferior (&all_lwps,
1895 select_singlestep_lwp_callback, NULL);
1896 if (event_lp != NULL)
1897 {
1898 if (debug_threads)
1899 fprintf (stderr,
1900 "SEL: Select single-step %s\n",
1901 target_pid_to_str (ptid_of (event_lp)));
1902 }
1903 else
1904 {
1905 /* No single-stepping LWP. Select one at random, out of those
1906 which have had SIGTRAP events. */
1907
1908 /* First see how many SIGTRAP events we have. */
1909 find_inferior (&all_lwps, count_events_callback, &num_events);
1910
1911 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1912 random_selector = (int)
1913 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1914
1915 if (debug_threads && num_events > 1)
1916 fprintf (stderr,
1917 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1918 num_events, random_selector);
1919
1920 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1921 select_event_lwp_callback,
1922 &random_selector);
1923 }
1924
1925 if (event_lp != NULL)
1926 {
1927 /* Switch the event LWP. */
1928 *orig_lp = event_lp;
1929 }
1930 }
1931
1932 /* Decrement the suspend count of an LWP. */
1933
1934 static int
1935 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1936 {
1937 struct lwp_info *lwp = (struct lwp_info *) entry;
1938
1939 /* Ignore EXCEPT. */
1940 if (lwp == except)
1941 return 0;
1942
1943 lwp->suspended--;
1944
1945 gdb_assert (lwp->suspended >= 0);
1946 return 0;
1947 }
1948
1949 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1950 NULL. */
1951
1952 static void
1953 unsuspend_all_lwps (struct lwp_info *except)
1954 {
1955 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1956 }
1957
1958 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1959 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1960 void *data);
1961 static int lwp_running (struct inferior_list_entry *entry, void *data);
1962 static ptid_t linux_wait_1 (ptid_t ptid,
1963 struct target_waitstatus *ourstatus,
1964 int target_options);
1965
1966 /* Stabilize threads (move out of jump pads).
1967
1968 If a thread is midway collecting a fast tracepoint, we need to
1969 finish the collection and move it out of the jump pad before
1970 reporting the signal.
1971
1972 This avoids recursion while collecting (when a signal arrives
1973 midway, and the signal handler itself collects), which would trash
1974 the trace buffer. In case the user set a breakpoint in a signal
1975 handler, this avoids the backtrace showing the jump pad, etc..
1976 Most importantly, there are certain things we can't do safely if
1977 threads are stopped in a jump pad (or in its callee's). For
1978 example:
1979
1980 - starting a new trace run. A thread still collecting the
1981 previous run, could trash the trace buffer when resumed. The trace
1982 buffer control structures would have been reset but the thread had
1983 no way to tell. The thread could even midway memcpy'ing to the
1984 buffer, which would mean that when resumed, it would clobber the
1985 trace buffer that had been set for a new run.
1986
1987 - we can't rewrite/reuse the jump pads for new tracepoints
1988 safely. Say you do tstart while a thread is stopped midway while
1989 collecting. When the thread is later resumed, it finishes the
1990 collection, and returns to the jump pad, to execute the original
1991 instruction that was under the tracepoint jump at the time the
1992 older run had been started. If the jump pad had been rewritten
1993 since for something else in the new run, the thread would now
1994 execute the wrong / random instructions. */
1995
1996 static void
1997 linux_stabilize_threads (void)
1998 {
1999 struct thread_info *save_inferior;
2000 struct lwp_info *lwp_stuck;
2001
2002 lwp_stuck
2003 = (struct lwp_info *) find_inferior (&all_lwps,
2004 stuck_in_jump_pad_callback, NULL);
2005 if (lwp_stuck != NULL)
2006 {
2007 if (debug_threads)
2008 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2009 lwpid_of (lwp_stuck));
2010 return;
2011 }
2012
2013 save_inferior = current_inferior;
2014
2015 stabilizing_threads = 1;
2016
2017 /* Kick 'em all. */
2018 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2019
2020 /* Loop until all are stopped out of the jump pads. */
2021 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2022 {
2023 struct target_waitstatus ourstatus;
2024 struct lwp_info *lwp;
2025 int wstat;
2026
2027 /* Note that we go through the full wait even loop. While
2028 moving threads out of jump pad, we need to be able to step
2029 over internal breakpoints and such. */
2030 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2031
2032 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2033 {
2034 lwp = get_thread_lwp (current_inferior);
2035
2036 /* Lock it. */
2037 lwp->suspended++;
2038
2039 if (ourstatus.value.sig != TARGET_SIGNAL_0
2040 || current_inferior->last_resume_kind == resume_stop)
2041 {
2042 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2043 enqueue_one_deferred_signal (lwp, &wstat);
2044 }
2045 }
2046 }
2047
2048 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2049
2050 stabilizing_threads = 0;
2051
2052 current_inferior = save_inferior;
2053
2054 if (debug_threads)
2055 {
2056 lwp_stuck
2057 = (struct lwp_info *) find_inferior (&all_lwps,
2058 stuck_in_jump_pad_callback, NULL);
2059 if (lwp_stuck != NULL)
2060 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2061 lwpid_of (lwp_stuck));
2062 }
2063 }
2064
2065 /* Wait for process, returns status. */
2066
2067 static ptid_t
2068 linux_wait_1 (ptid_t ptid,
2069 struct target_waitstatus *ourstatus, int target_options)
2070 {
2071 int w;
2072 struct lwp_info *event_child;
2073 int options;
2074 int pid;
2075 int step_over_finished;
2076 int bp_explains_trap;
2077 int maybe_internal_trap;
2078 int report_to_gdb;
2079 int trace_event;
2080
2081 /* Translate generic target options into linux options. */
2082 options = __WALL;
2083 if (target_options & TARGET_WNOHANG)
2084 options |= WNOHANG;
2085
2086 retry:
2087 bp_explains_trap = 0;
2088 trace_event = 0;
2089 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2090
2091 /* If we were only supposed to resume one thread, only wait for
2092 that thread - if it's still alive. If it died, however - which
2093 can happen if we're coming from the thread death case below -
2094 then we need to make sure we restart the other threads. We could
2095 pick a thread at random or restart all; restarting all is less
2096 arbitrary. */
2097 if (!non_stop
2098 && !ptid_equal (cont_thread, null_ptid)
2099 && !ptid_equal (cont_thread, minus_one_ptid))
2100 {
2101 struct thread_info *thread;
2102
2103 thread = (struct thread_info *) find_inferior_id (&all_threads,
2104 cont_thread);
2105
2106 /* No stepping, no signal - unless one is pending already, of course. */
2107 if (thread == NULL)
2108 {
2109 struct thread_resume resume_info;
2110 resume_info.thread = minus_one_ptid;
2111 resume_info.kind = resume_continue;
2112 resume_info.sig = 0;
2113 linux_resume (&resume_info, 1);
2114 }
2115 else
2116 ptid = cont_thread;
2117 }
2118
2119 if (ptid_equal (step_over_bkpt, null_ptid))
2120 pid = linux_wait_for_event (ptid, &w, options);
2121 else
2122 {
2123 if (debug_threads)
2124 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2125 target_pid_to_str (step_over_bkpt));
2126 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2127 }
2128
2129 if (pid == 0) /* only if TARGET_WNOHANG */
2130 return null_ptid;
2131
2132 event_child = get_thread_lwp (current_inferior);
2133
2134 /* If we are waiting for a particular child, and it exited,
2135 linux_wait_for_event will return its exit status. Similarly if
2136 the last child exited. If this is not the last child, however,
2137 do not report it as exited until there is a 'thread exited' response
2138 available in the remote protocol. Instead, just wait for another event.
2139 This should be safe, because if the thread crashed we will already
2140 have reported the termination signal to GDB; that should stop any
2141 in-progress stepping operations, etc.
2142
2143 Report the exit status of the last thread to exit. This matches
2144 LinuxThreads' behavior. */
2145
2146 if (last_thread_of_process_p (current_inferior))
2147 {
2148 if (WIFEXITED (w) || WIFSIGNALED (w))
2149 {
2150 if (WIFEXITED (w))
2151 {
2152 ourstatus->kind = TARGET_WAITKIND_EXITED;
2153 ourstatus->value.integer = WEXITSTATUS (w);
2154
2155 if (debug_threads)
2156 fprintf (stderr,
2157 "\nChild exited with retcode = %x \n",
2158 WEXITSTATUS (w));
2159 }
2160 else
2161 {
2162 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2163 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2164
2165 if (debug_threads)
2166 fprintf (stderr,
2167 "\nChild terminated with signal = %x \n",
2168 WTERMSIG (w));
2169
2170 }
2171
2172 return ptid_of (event_child);
2173 }
2174 }
2175 else
2176 {
2177 if (!WIFSTOPPED (w))
2178 goto retry;
2179 }
2180
2181 /* If this event was not handled before, and is not a SIGTRAP, we
2182 report it. SIGILL and SIGSEGV are also treated as traps in case
2183 a breakpoint is inserted at the current PC. If this target does
2184 not support internal breakpoints at all, we also report the
2185 SIGTRAP without further processing; it's of no concern to us. */
2186 maybe_internal_trap
2187 = (supports_breakpoints ()
2188 && (WSTOPSIG (w) == SIGTRAP
2189 || ((WSTOPSIG (w) == SIGILL
2190 || WSTOPSIG (w) == SIGSEGV)
2191 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2192
2193 if (maybe_internal_trap)
2194 {
2195 /* Handle anything that requires bookkeeping before deciding to
2196 report the event or continue waiting. */
2197
2198 /* First check if we can explain the SIGTRAP with an internal
2199 breakpoint, or if we should possibly report the event to GDB.
2200 Do this before anything that may remove or insert a
2201 breakpoint. */
2202 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2203
2204 /* We have a SIGTRAP, possibly a step-over dance has just
2205 finished. If so, tweak the state machine accordingly,
2206 reinsert breakpoints and delete any reinsert (software
2207 single-step) breakpoints. */
2208 step_over_finished = finish_step_over (event_child);
2209
2210 /* Now invoke the callbacks of any internal breakpoints there. */
2211 check_breakpoints (event_child->stop_pc);
2212
2213 /* Handle tracepoint data collecting. This may overflow the
2214 trace buffer, and cause a tracing stop, removing
2215 breakpoints. */
2216 trace_event = handle_tracepoints (event_child);
2217
2218 if (bp_explains_trap)
2219 {
2220 /* If we stepped or ran into an internal breakpoint, we've
2221 already handled it. So next time we resume (from this
2222 PC), we should step over it. */
2223 if (debug_threads)
2224 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2225
2226 if (breakpoint_here (event_child->stop_pc))
2227 event_child->need_step_over = 1;
2228 }
2229 }
2230 else
2231 {
2232 /* We have some other signal, possibly a step-over dance was in
2233 progress, and it should be cancelled too. */
2234 step_over_finished = finish_step_over (event_child);
2235 }
2236
2237 /* We have all the data we need. Either report the event to GDB, or
2238 resume threads and keep waiting for more. */
2239
2240 /* If we're collecting a fast tracepoint, finish the collection and
2241 move out of the jump pad before delivering a signal. See
2242 linux_stabilize_threads. */
2243
2244 if (WIFSTOPPED (w)
2245 && WSTOPSIG (w) != SIGTRAP
2246 && supports_fast_tracepoints ()
2247 && in_process_agent_loaded ())
2248 {
2249 if (debug_threads)
2250 fprintf (stderr,
2251 "Got signal %d for LWP %ld. Check if we need "
2252 "to defer or adjust it.\n",
2253 WSTOPSIG (w), lwpid_of (event_child));
2254
2255 /* Allow debugging the jump pad itself. */
2256 if (current_inferior->last_resume_kind != resume_step
2257 && maybe_move_out_of_jump_pad (event_child, &w))
2258 {
2259 enqueue_one_deferred_signal (event_child, &w);
2260
2261 if (debug_threads)
2262 fprintf (stderr,
2263 "Signal %d for LWP %ld deferred (in jump pad)\n",
2264 WSTOPSIG (w), lwpid_of (event_child));
2265
2266 linux_resume_one_lwp (event_child, 0, 0, NULL);
2267 goto retry;
2268 }
2269 }
2270
2271 if (event_child->collecting_fast_tracepoint)
2272 {
2273 if (debug_threads)
2274 fprintf (stderr, "\
2275 LWP %ld was trying to move out of the jump pad (%d). \
2276 Check if we're already there.\n",
2277 lwpid_of (event_child),
2278 event_child->collecting_fast_tracepoint);
2279
2280 trace_event = 1;
2281
2282 event_child->collecting_fast_tracepoint
2283 = linux_fast_tracepoint_collecting (event_child, NULL);
2284
2285 if (event_child->collecting_fast_tracepoint != 1)
2286 {
2287 /* No longer need this breakpoint. */
2288 if (event_child->exit_jump_pad_bkpt != NULL)
2289 {
2290 if (debug_threads)
2291 fprintf (stderr,
2292 "No longer need exit-jump-pad bkpt; removing it."
2293 "stopping all threads momentarily.\n");
2294
2295 /* Other running threads could hit this breakpoint.
2296 We don't handle moribund locations like GDB does,
2297 instead we always pause all threads when removing
2298 breakpoints, so that any step-over or
2299 decr_pc_after_break adjustment is always taken
2300 care of while the breakpoint is still
2301 inserted. */
2302 stop_all_lwps (1, event_child);
2303 cancel_breakpoints ();
2304
2305 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2306 event_child->exit_jump_pad_bkpt = NULL;
2307
2308 unstop_all_lwps (1, event_child);
2309
2310 gdb_assert (event_child->suspended >= 0);
2311 }
2312 }
2313
2314 if (event_child->collecting_fast_tracepoint == 0)
2315 {
2316 if (debug_threads)
2317 fprintf (stderr,
2318 "fast tracepoint finished "
2319 "collecting successfully.\n");
2320
2321 /* We may have a deferred signal to report. */
2322 if (dequeue_one_deferred_signal (event_child, &w))
2323 {
2324 if (debug_threads)
2325 fprintf (stderr, "dequeued one signal.\n");
2326 }
2327 else
2328 {
2329 if (debug_threads)
2330 fprintf (stderr, "no deferred signals.\n");
2331
2332 if (stabilizing_threads)
2333 {
2334 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2335 ourstatus->value.sig = TARGET_SIGNAL_0;
2336 return ptid_of (event_child);
2337 }
2338 }
2339 }
2340 }
2341
2342 /* Check whether GDB would be interested in this event. */
2343
2344 /* If GDB is not interested in this signal, don't stop other
2345 threads, and don't report it to GDB. Just resume the inferior
2346 right away. We do this for threading-related signals as well as
2347 any that GDB specifically requested we ignore. But never ignore
2348 SIGSTOP if we sent it ourselves, and do not ignore signals when
2349 stepping - they may require special handling to skip the signal
2350 handler. */
2351 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2352 thread library? */
2353 if (WIFSTOPPED (w)
2354 && current_inferior->last_resume_kind != resume_step
2355 && (
2356 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2357 (current_process ()->private->thread_db != NULL
2358 && (WSTOPSIG (w) == __SIGRTMIN
2359 || WSTOPSIG (w) == __SIGRTMIN + 1))
2360 ||
2361 #endif
2362 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2363 && !(WSTOPSIG (w) == SIGSTOP
2364 && current_inferior->last_resume_kind == resume_stop))))
2365 {
2366 siginfo_t info, *info_p;
2367
2368 if (debug_threads)
2369 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2370 WSTOPSIG (w), lwpid_of (event_child));
2371
2372 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2373 info_p = &info;
2374 else
2375 info_p = NULL;
2376 linux_resume_one_lwp (event_child, event_child->stepping,
2377 WSTOPSIG (w), info_p);
2378 goto retry;
2379 }
2380
2381 /* If GDB wanted this thread to single step, we always want to
2382 report the SIGTRAP, and let GDB handle it. Watchpoints should
2383 always be reported. So should signals we can't explain. A
2384 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2385 not support Z0 breakpoints. If we do, we're be able to handle
2386 GDB breakpoints on top of internal breakpoints, by handling the
2387 internal breakpoint and still reporting the event to GDB. If we
2388 don't, we're out of luck, GDB won't see the breakpoint hit. */
2389 report_to_gdb = (!maybe_internal_trap
2390 || current_inferior->last_resume_kind == resume_step
2391 || event_child->stopped_by_watchpoint
2392 || (!step_over_finished
2393 && !bp_explains_trap && !trace_event)
2394 || gdb_breakpoint_here (event_child->stop_pc));
2395
2396 /* We found no reason GDB would want us to stop. We either hit one
2397 of our own breakpoints, or finished an internal step GDB
2398 shouldn't know about. */
2399 if (!report_to_gdb)
2400 {
2401 if (debug_threads)
2402 {
2403 if (bp_explains_trap)
2404 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2405 if (step_over_finished)
2406 fprintf (stderr, "Step-over finished.\n");
2407 if (trace_event)
2408 fprintf (stderr, "Tracepoint event.\n");
2409 }
2410
2411 /* We're not reporting this breakpoint to GDB, so apply the
2412 decr_pc_after_break adjustment to the inferior's regcache
2413 ourselves. */
2414
2415 if (the_low_target.set_pc != NULL)
2416 {
2417 struct regcache *regcache
2418 = get_thread_regcache (get_lwp_thread (event_child), 1);
2419 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2420 }
2421
2422 /* We may have finished stepping over a breakpoint. If so,
2423 we've stopped and suspended all LWPs momentarily except the
2424 stepping one. This is where we resume them all again. We're
2425 going to keep waiting, so use proceed, which handles stepping
2426 over the next breakpoint. */
2427 if (debug_threads)
2428 fprintf (stderr, "proceeding all threads.\n");
2429
2430 if (step_over_finished)
2431 unsuspend_all_lwps (event_child);
2432
2433 proceed_all_lwps ();
2434 goto retry;
2435 }
2436
2437 if (debug_threads)
2438 {
2439 if (current_inferior->last_resume_kind == resume_step)
2440 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2441 if (event_child->stopped_by_watchpoint)
2442 fprintf (stderr, "Stopped by watchpoint.\n");
2443 if (gdb_breakpoint_here (event_child->stop_pc))
2444 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2445 if (debug_threads)
2446 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2447 }
2448
2449 /* Alright, we're going to report a stop. */
2450
2451 if (!non_stop && !stabilizing_threads)
2452 {
2453 /* In all-stop, stop all threads. */
2454 stop_all_lwps (0, NULL);
2455
2456 /* If we're not waiting for a specific LWP, choose an event LWP
2457 from among those that have had events. Giving equal priority
2458 to all LWPs that have had events helps prevent
2459 starvation. */
2460 if (ptid_equal (ptid, minus_one_ptid))
2461 {
2462 event_child->status_pending_p = 1;
2463 event_child->status_pending = w;
2464
2465 select_event_lwp (&event_child);
2466
2467 event_child->status_pending_p = 0;
2468 w = event_child->status_pending;
2469 }
2470
2471 /* Now that we've selected our final event LWP, cancel any
2472 breakpoints in other LWPs that have hit a GDB breakpoint.
2473 See the comment in cancel_breakpoints_callback to find out
2474 why. */
2475 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2476
2477 /* Stabilize threads (move out of jump pads). */
2478 stabilize_threads ();
2479 }
2480 else
2481 {
2482 /* If we just finished a step-over, then all threads had been
2483 momentarily paused. In all-stop, that's fine, we want
2484 threads stopped by now anyway. In non-stop, we need to
2485 re-resume threads that GDB wanted to be running. */
2486 if (step_over_finished)
2487 unstop_all_lwps (1, event_child);
2488 }
2489
2490 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2491
2492 if (current_inferior->last_resume_kind == resume_stop
2493 && WSTOPSIG (w) == SIGSTOP)
2494 {
2495 /* A thread that has been requested to stop by GDB with vCont;t,
2496 and it stopped cleanly, so report as SIG0. The use of
2497 SIGSTOP is an implementation detail. */
2498 ourstatus->value.sig = TARGET_SIGNAL_0;
2499 }
2500 else if (current_inferior->last_resume_kind == resume_stop
2501 && WSTOPSIG (w) != SIGSTOP)
2502 {
2503 /* A thread that has been requested to stop by GDB with vCont;t,
2504 but, it stopped for other reasons. */
2505 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2506 }
2507 else
2508 {
2509 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2510 }
2511
2512 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2513
2514 if (debug_threads)
2515 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2516 target_pid_to_str (ptid_of (event_child)),
2517 ourstatus->kind,
2518 ourstatus->value.sig);
2519
2520 return ptid_of (event_child);
2521 }
2522
2523 /* Get rid of any pending event in the pipe. */
2524 static void
2525 async_file_flush (void)
2526 {
2527 int ret;
2528 char buf;
2529
2530 do
2531 ret = read (linux_event_pipe[0], &buf, 1);
2532 while (ret >= 0 || (ret == -1 && errno == EINTR));
2533 }
2534
2535 /* Put something in the pipe, so the event loop wakes up. */
2536 static void
2537 async_file_mark (void)
2538 {
2539 int ret;
2540
2541 async_file_flush ();
2542
2543 do
2544 ret = write (linux_event_pipe[1], "+", 1);
2545 while (ret == 0 || (ret == -1 && errno == EINTR));
2546
2547 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2548 be awakened anyway. */
2549 }
2550
2551 static ptid_t
2552 linux_wait (ptid_t ptid,
2553 struct target_waitstatus *ourstatus, int target_options)
2554 {
2555 ptid_t event_ptid;
2556
2557 if (debug_threads)
2558 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2559
2560 /* Flush the async file first. */
2561 if (target_is_async_p ())
2562 async_file_flush ();
2563
2564 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2565
2566 /* If at least one stop was reported, there may be more. A single
2567 SIGCHLD can signal more than one child stop. */
2568 if (target_is_async_p ()
2569 && (target_options & TARGET_WNOHANG) != 0
2570 && !ptid_equal (event_ptid, null_ptid))
2571 async_file_mark ();
2572
2573 return event_ptid;
2574 }
2575
2576 /* Send a signal to an LWP. */
2577
2578 static int
2579 kill_lwp (unsigned long lwpid, int signo)
2580 {
2581 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2582 fails, then we are not using nptl threads and we should be using kill. */
2583
2584 #ifdef __NR_tkill
2585 {
2586 static int tkill_failed;
2587
2588 if (!tkill_failed)
2589 {
2590 int ret;
2591
2592 errno = 0;
2593 ret = syscall (__NR_tkill, lwpid, signo);
2594 if (errno != ENOSYS)
2595 return ret;
2596 tkill_failed = 1;
2597 }
2598 }
2599 #endif
2600
2601 return kill (lwpid, signo);
2602 }
2603
2604 void
2605 linux_stop_lwp (struct lwp_info *lwp)
2606 {
2607 send_sigstop (lwp);
2608 }
2609
2610 static void
2611 send_sigstop (struct lwp_info *lwp)
2612 {
2613 int pid;
2614
2615 pid = lwpid_of (lwp);
2616
2617 /* If we already have a pending stop signal for this process, don't
2618 send another. */
2619 if (lwp->stop_expected)
2620 {
2621 if (debug_threads)
2622 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2623
2624 return;
2625 }
2626
2627 if (debug_threads)
2628 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2629
2630 lwp->stop_expected = 1;
2631 kill_lwp (pid, SIGSTOP);
2632 }
2633
2634 static int
2635 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2636 {
2637 struct lwp_info *lwp = (struct lwp_info *) entry;
2638
2639 /* Ignore EXCEPT. */
2640 if (lwp == except)
2641 return 0;
2642
2643 if (lwp->stopped)
2644 return 0;
2645
2646 send_sigstop (lwp);
2647 return 0;
2648 }
2649
2650 /* Increment the suspend count of an LWP, and stop it, if not stopped
2651 yet. */
2652 static int
2653 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2654 void *except)
2655 {
2656 struct lwp_info *lwp = (struct lwp_info *) entry;
2657
2658 /* Ignore EXCEPT. */
2659 if (lwp == except)
2660 return 0;
2661
2662 lwp->suspended++;
2663
2664 return send_sigstop_callback (entry, except);
2665 }
2666
2667 static void
2668 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2669 {
2670 /* It's dead, really. */
2671 lwp->dead = 1;
2672
2673 /* Store the exit status for later. */
2674 lwp->status_pending_p = 1;
2675 lwp->status_pending = wstat;
2676
2677 /* Prevent trying to stop it. */
2678 lwp->stopped = 1;
2679
2680 /* No further stops are expected from a dead lwp. */
2681 lwp->stop_expected = 0;
2682 }
2683
2684 static void
2685 wait_for_sigstop (struct inferior_list_entry *entry)
2686 {
2687 struct lwp_info *lwp = (struct lwp_info *) entry;
2688 struct thread_info *saved_inferior;
2689 int wstat;
2690 ptid_t saved_tid;
2691 ptid_t ptid;
2692 int pid;
2693
2694 if (lwp->stopped)
2695 {
2696 if (debug_threads)
2697 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2698 lwpid_of (lwp));
2699 return;
2700 }
2701
2702 saved_inferior = current_inferior;
2703 if (saved_inferior != NULL)
2704 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2705 else
2706 saved_tid = null_ptid; /* avoid bogus unused warning */
2707
2708 ptid = lwp->head.id;
2709
2710 if (debug_threads)
2711 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2712
2713 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2714
2715 /* If we stopped with a non-SIGSTOP signal, save it for later
2716 and record the pending SIGSTOP. If the process exited, just
2717 return. */
2718 if (WIFSTOPPED (wstat))
2719 {
2720 if (debug_threads)
2721 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2722 lwpid_of (lwp), WSTOPSIG (wstat));
2723
2724 if (WSTOPSIG (wstat) != SIGSTOP)
2725 {
2726 if (debug_threads)
2727 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2728 lwpid_of (lwp), wstat);
2729
2730 lwp->status_pending_p = 1;
2731 lwp->status_pending = wstat;
2732 }
2733 }
2734 else
2735 {
2736 if (debug_threads)
2737 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2738
2739 lwp = find_lwp_pid (pid_to_ptid (pid));
2740 if (lwp)
2741 {
2742 /* Leave this status pending for the next time we're able to
2743 report it. In the mean time, we'll report this lwp as
2744 dead to GDB, so GDB doesn't try to read registers and
2745 memory from it. This can only happen if this was the
2746 last thread of the process; otherwise, PID is removed
2747 from the thread tables before linux_wait_for_event
2748 returns. */
2749 mark_lwp_dead (lwp, wstat);
2750 }
2751 }
2752
2753 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2754 current_inferior = saved_inferior;
2755 else
2756 {
2757 if (debug_threads)
2758 fprintf (stderr, "Previously current thread died.\n");
2759
2760 if (non_stop)
2761 {
2762 /* We can't change the current inferior behind GDB's back,
2763 otherwise, a subsequent command may apply to the wrong
2764 process. */
2765 current_inferior = NULL;
2766 }
2767 else
2768 {
2769 /* Set a valid thread as current. */
2770 set_desired_inferior (0);
2771 }
2772 }
2773 }
2774
2775 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2776 move it out, because we need to report the stop event to GDB. For
2777 example, if the user puts a breakpoint in the jump pad, it's
2778 because she wants to debug it. */
2779
2780 static int
2781 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2782 {
2783 struct lwp_info *lwp = (struct lwp_info *) entry;
2784 struct thread_info *thread = get_lwp_thread (lwp);
2785
2786 gdb_assert (lwp->suspended == 0);
2787 gdb_assert (lwp->stopped);
2788
2789 /* Allow debugging the jump pad, gdb_collect, etc.. */
2790 return (supports_fast_tracepoints ()
2791 && in_process_agent_loaded ()
2792 && (gdb_breakpoint_here (lwp->stop_pc)
2793 || lwp->stopped_by_watchpoint
2794 || thread->last_resume_kind == resume_step)
2795 && linux_fast_tracepoint_collecting (lwp, NULL));
2796 }
2797
2798 static void
2799 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2800 {
2801 struct lwp_info *lwp = (struct lwp_info *) entry;
2802 struct thread_info *thread = get_lwp_thread (lwp);
2803 int *wstat;
2804
2805 gdb_assert (lwp->suspended == 0);
2806 gdb_assert (lwp->stopped);
2807
2808 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2809
2810 /* Allow debugging the jump pad, gdb_collect, etc. */
2811 if (!gdb_breakpoint_here (lwp->stop_pc)
2812 && !lwp->stopped_by_watchpoint
2813 && thread->last_resume_kind != resume_step
2814 && maybe_move_out_of_jump_pad (lwp, wstat))
2815 {
2816 if (debug_threads)
2817 fprintf (stderr,
2818 "LWP %ld needs stabilizing (in jump pad)\n",
2819 lwpid_of (lwp));
2820
2821 if (wstat)
2822 {
2823 lwp->status_pending_p = 0;
2824 enqueue_one_deferred_signal (lwp, wstat);
2825
2826 if (debug_threads)
2827 fprintf (stderr,
2828 "Signal %d for LWP %ld deferred "
2829 "(in jump pad)\n",
2830 WSTOPSIG (*wstat), lwpid_of (lwp));
2831 }
2832
2833 linux_resume_one_lwp (lwp, 0, 0, NULL);
2834 }
2835 else
2836 lwp->suspended++;
2837 }
2838
2839 static int
2840 lwp_running (struct inferior_list_entry *entry, void *data)
2841 {
2842 struct lwp_info *lwp = (struct lwp_info *) entry;
2843
2844 if (lwp->dead)
2845 return 0;
2846 if (lwp->stopped)
2847 return 0;
2848 return 1;
2849 }
2850
2851 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2852 If SUSPEND, then also increase the suspend count of every LWP,
2853 except EXCEPT. */
2854
2855 static void
2856 stop_all_lwps (int suspend, struct lwp_info *except)
2857 {
2858 stopping_threads = 1;
2859
2860 if (suspend)
2861 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2862 else
2863 find_inferior (&all_lwps, send_sigstop_callback, except);
2864 for_each_inferior (&all_lwps, wait_for_sigstop);
2865 stopping_threads = 0;
2866 }
2867
2868 /* Resume execution of the inferior process.
2869 If STEP is nonzero, single-step it.
2870 If SIGNAL is nonzero, give it that signal. */
2871
2872 static void
2873 linux_resume_one_lwp (struct lwp_info *lwp,
2874 int step, int signal, siginfo_t *info)
2875 {
2876 struct thread_info *saved_inferior;
2877 int fast_tp_collecting;
2878
2879 if (lwp->stopped == 0)
2880 return;
2881
2882 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2883
2884 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2885
2886 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2887 user used the "jump" command, or "set $pc = foo"). */
2888 if (lwp->stop_pc != get_pc (lwp))
2889 {
2890 /* Collecting 'while-stepping' actions doesn't make sense
2891 anymore. */
2892 release_while_stepping_state_list (get_lwp_thread (lwp));
2893 }
2894
2895 /* If we have pending signals or status, and a new signal, enqueue the
2896 signal. Also enqueue the signal if we are waiting to reinsert a
2897 breakpoint; it will be picked up again below. */
2898 if (signal != 0
2899 && (lwp->status_pending_p
2900 || lwp->pending_signals != NULL
2901 || lwp->bp_reinsert != 0
2902 || fast_tp_collecting))
2903 {
2904 struct pending_signals *p_sig;
2905 p_sig = xmalloc (sizeof (*p_sig));
2906 p_sig->prev = lwp->pending_signals;
2907 p_sig->signal = signal;
2908 if (info == NULL)
2909 memset (&p_sig->info, 0, sizeof (siginfo_t));
2910 else
2911 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2912 lwp->pending_signals = p_sig;
2913 }
2914
2915 if (lwp->status_pending_p)
2916 {
2917 if (debug_threads)
2918 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2919 " has pending status\n",
2920 lwpid_of (lwp), step ? "step" : "continue", signal,
2921 lwp->stop_expected ? "expected" : "not expected");
2922 return;
2923 }
2924
2925 saved_inferior = current_inferior;
2926 current_inferior = get_lwp_thread (lwp);
2927
2928 if (debug_threads)
2929 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2930 lwpid_of (lwp), step ? "step" : "continue", signal,
2931 lwp->stop_expected ? "expected" : "not expected");
2932
2933 /* This bit needs some thinking about. If we get a signal that
2934 we must report while a single-step reinsert is still pending,
2935 we often end up resuming the thread. It might be better to
2936 (ew) allow a stack of pending events; then we could be sure that
2937 the reinsert happened right away and not lose any signals.
2938
2939 Making this stack would also shrink the window in which breakpoints are
2940 uninserted (see comment in linux_wait_for_lwp) but not enough for
2941 complete correctness, so it won't solve that problem. It may be
2942 worthwhile just to solve this one, however. */
2943 if (lwp->bp_reinsert != 0)
2944 {
2945 if (debug_threads)
2946 fprintf (stderr, " pending reinsert at 0x%s\n",
2947 paddress (lwp->bp_reinsert));
2948
2949 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2950 {
2951 if (fast_tp_collecting == 0)
2952 {
2953 if (step == 0)
2954 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2955 if (lwp->suspended)
2956 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2957 lwp->suspended);
2958 }
2959
2960 step = 1;
2961 }
2962
2963 /* Postpone any pending signal. It was enqueued above. */
2964 signal = 0;
2965 }
2966
2967 if (fast_tp_collecting == 1)
2968 {
2969 if (debug_threads)
2970 fprintf (stderr, "\
2971 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2972 lwpid_of (lwp));
2973
2974 /* Postpone any pending signal. It was enqueued above. */
2975 signal = 0;
2976 }
2977 else if (fast_tp_collecting == 2)
2978 {
2979 if (debug_threads)
2980 fprintf (stderr, "\
2981 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2982 lwpid_of (lwp));
2983
2984 if (can_hardware_single_step ())
2985 step = 1;
2986 else
2987 fatal ("moving out of jump pad single-stepping"
2988 " not implemented on this target");
2989
2990 /* Postpone any pending signal. It was enqueued above. */
2991 signal = 0;
2992 }
2993
2994 /* If we have while-stepping actions in this thread set it stepping.
2995 If we have a signal to deliver, it may or may not be set to
2996 SIG_IGN, we don't know. Assume so, and allow collecting
2997 while-stepping into a signal handler. A possible smart thing to
2998 do would be to set an internal breakpoint at the signal return
2999 address, continue, and carry on catching this while-stepping
3000 action only when that breakpoint is hit. A future
3001 enhancement. */
3002 if (get_lwp_thread (lwp)->while_stepping != NULL
3003 && can_hardware_single_step ())
3004 {
3005 if (debug_threads)
3006 fprintf (stderr,
3007 "lwp %ld has a while-stepping action -> forcing step.\n",
3008 lwpid_of (lwp));
3009 step = 1;
3010 }
3011
3012 if (debug_threads && the_low_target.get_pc != NULL)
3013 {
3014 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3015 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3016 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3017 }
3018
3019 /* If we have pending signals, consume one unless we are trying to
3020 reinsert a breakpoint or we're trying to finish a fast tracepoint
3021 collect. */
3022 if (lwp->pending_signals != NULL
3023 && lwp->bp_reinsert == 0
3024 && fast_tp_collecting == 0)
3025 {
3026 struct pending_signals **p_sig;
3027
3028 p_sig = &lwp->pending_signals;
3029 while ((*p_sig)->prev != NULL)
3030 p_sig = &(*p_sig)->prev;
3031
3032 signal = (*p_sig)->signal;
3033 if ((*p_sig)->info.si_signo != 0)
3034 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3035
3036 free (*p_sig);
3037 *p_sig = NULL;
3038 }
3039
3040 if (the_low_target.prepare_to_resume != NULL)
3041 the_low_target.prepare_to_resume (lwp);
3042
3043 regcache_invalidate_one ((struct inferior_list_entry *)
3044 get_lwp_thread (lwp));
3045 errno = 0;
3046 lwp->stopped = 0;
3047 lwp->stopped_by_watchpoint = 0;
3048 lwp->stepping = step;
3049 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3050 /* Coerce to a uintptr_t first to avoid potential gcc warning
3051 of coercing an 8 byte integer to a 4 byte pointer. */
3052 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3053
3054 current_inferior = saved_inferior;
3055 if (errno)
3056 {
3057 /* ESRCH from ptrace either means that the thread was already
3058 running (an error) or that it is gone (a race condition). If
3059 it's gone, we will get a notification the next time we wait,
3060 so we can ignore the error. We could differentiate these
3061 two, but it's tricky without waiting; the thread still exists
3062 as a zombie, so sending it signal 0 would succeed. So just
3063 ignore ESRCH. */
3064 if (errno == ESRCH)
3065 return;
3066
3067 perror_with_name ("ptrace");
3068 }
3069 }
3070
3071 struct thread_resume_array
3072 {
3073 struct thread_resume *resume;
3074 size_t n;
3075 };
3076
3077 /* This function is called once per thread. We look up the thread
3078 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3079 resume request.
3080
3081 This algorithm is O(threads * resume elements), but resume elements
3082 is small (and will remain small at least until GDB supports thread
3083 suspension). */
3084 static int
3085 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3086 {
3087 struct lwp_info *lwp;
3088 struct thread_info *thread;
3089 int ndx;
3090 struct thread_resume_array *r;
3091
3092 thread = (struct thread_info *) entry;
3093 lwp = get_thread_lwp (thread);
3094 r = arg;
3095
3096 for (ndx = 0; ndx < r->n; ndx++)
3097 {
3098 ptid_t ptid = r->resume[ndx].thread;
3099 if (ptid_equal (ptid, minus_one_ptid)
3100 || ptid_equal (ptid, entry->id)
3101 || (ptid_is_pid (ptid)
3102 && (ptid_get_pid (ptid) == pid_of (lwp)))
3103 || (ptid_get_lwp (ptid) == -1
3104 && (ptid_get_pid (ptid) == pid_of (lwp))))
3105 {
3106 if (r->resume[ndx].kind == resume_stop
3107 && thread->last_resume_kind == resume_stop)
3108 {
3109 if (debug_threads)
3110 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3111 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3112 ? "stopped"
3113 : "stopping",
3114 lwpid_of (lwp));
3115
3116 continue;
3117 }
3118
3119 lwp->resume = &r->resume[ndx];
3120 thread->last_resume_kind = lwp->resume->kind;
3121
3122 /* If we had a deferred signal to report, dequeue one now.
3123 This can happen if LWP gets more than one signal while
3124 trying to get out of a jump pad. */
3125 if (lwp->stopped
3126 && !lwp->status_pending_p
3127 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3128 {
3129 lwp->status_pending_p = 1;
3130
3131 if (debug_threads)
3132 fprintf (stderr,
3133 "Dequeueing deferred signal %d for LWP %ld, "
3134 "leaving status pending.\n",
3135 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3136 }
3137
3138 return 0;
3139 }
3140 }
3141
3142 /* No resume action for this thread. */
3143 lwp->resume = NULL;
3144
3145 return 0;
3146 }
3147
3148
3149 /* Set *FLAG_P if this lwp has an interesting status pending. */
3150 static int
3151 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3152 {
3153 struct lwp_info *lwp = (struct lwp_info *) entry;
3154
3155 /* LWPs which will not be resumed are not interesting, because
3156 we might not wait for them next time through linux_wait. */
3157 if (lwp->resume == NULL)
3158 return 0;
3159
3160 if (lwp->status_pending_p)
3161 * (int *) flag_p = 1;
3162
3163 return 0;
3164 }
3165
3166 /* Return 1 if this lwp that GDB wants running is stopped at an
3167 internal breakpoint that we need to step over. It assumes that any
3168 required STOP_PC adjustment has already been propagated to the
3169 inferior's regcache. */
3170
3171 static int
3172 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3173 {
3174 struct lwp_info *lwp = (struct lwp_info *) entry;
3175 struct thread_info *thread;
3176 struct thread_info *saved_inferior;
3177 CORE_ADDR pc;
3178
3179 /* LWPs which will not be resumed are not interesting, because we
3180 might not wait for them next time through linux_wait. */
3181
3182 if (!lwp->stopped)
3183 {
3184 if (debug_threads)
3185 fprintf (stderr,
3186 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3187 lwpid_of (lwp));
3188 return 0;
3189 }
3190
3191 thread = get_lwp_thread (lwp);
3192
3193 if (thread->last_resume_kind == resume_stop)
3194 {
3195 if (debug_threads)
3196 fprintf (stderr,
3197 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3198 lwpid_of (lwp));
3199 return 0;
3200 }
3201
3202 gdb_assert (lwp->suspended >= 0);
3203
3204 if (lwp->suspended)
3205 {
3206 if (debug_threads)
3207 fprintf (stderr,
3208 "Need step over [LWP %ld]? Ignoring, suspended\n",
3209 lwpid_of (lwp));
3210 return 0;
3211 }
3212
3213 if (!lwp->need_step_over)
3214 {
3215 if (debug_threads)
3216 fprintf (stderr,
3217 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3218 }
3219
3220 if (lwp->status_pending_p)
3221 {
3222 if (debug_threads)
3223 fprintf (stderr,
3224 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3225 lwpid_of (lwp));
3226 return 0;
3227 }
3228
3229 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3230 or we have. */
3231 pc = get_pc (lwp);
3232
3233 /* If the PC has changed since we stopped, then don't do anything,
3234 and let the breakpoint/tracepoint be hit. This happens if, for
3235 instance, GDB handled the decr_pc_after_break subtraction itself,
3236 GDB is OOL stepping this thread, or the user has issued a "jump"
3237 command, or poked thread's registers herself. */
3238 if (pc != lwp->stop_pc)
3239 {
3240 if (debug_threads)
3241 fprintf (stderr,
3242 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3243 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3244 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3245
3246 lwp->need_step_over = 0;
3247 return 0;
3248 }
3249
3250 saved_inferior = current_inferior;
3251 current_inferior = thread;
3252
3253 /* We can only step over breakpoints we know about. */
3254 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3255 {
3256 /* Don't step over a breakpoint that GDB expects to hit
3257 though. */
3258 if (gdb_breakpoint_here (pc))
3259 {
3260 if (debug_threads)
3261 fprintf (stderr,
3262 "Need step over [LWP %ld]? yes, but found"
3263 " GDB breakpoint at 0x%s; skipping step over\n",
3264 lwpid_of (lwp), paddress (pc));
3265
3266 current_inferior = saved_inferior;
3267 return 0;
3268 }
3269 else
3270 {
3271 if (debug_threads)
3272 fprintf (stderr,
3273 "Need step over [LWP %ld]? yes, "
3274 "found breakpoint at 0x%s\n",
3275 lwpid_of (lwp), paddress (pc));
3276
3277 /* We've found an lwp that needs stepping over --- return 1 so
3278 that find_inferior stops looking. */
3279 current_inferior = saved_inferior;
3280
3281 /* If the step over is cancelled, this is set again. */
3282 lwp->need_step_over = 0;
3283 return 1;
3284 }
3285 }
3286
3287 current_inferior = saved_inferior;
3288
3289 if (debug_threads)
3290 fprintf (stderr,
3291 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3292 lwpid_of (lwp), paddress (pc));
3293
3294 return 0;
3295 }
3296
3297 /* Start a step-over operation on LWP. When LWP stopped at a
3298 breakpoint, to make progress, we need to remove the breakpoint out
3299 of the way. If we let other threads run while we do that, they may
3300 pass by the breakpoint location and miss hitting it. To avoid
3301 that, a step-over momentarily stops all threads while LWP is
3302 single-stepped while the breakpoint is temporarily uninserted from
3303 the inferior. When the single-step finishes, we reinsert the
3304 breakpoint, and let all threads that are supposed to be running,
3305 run again.
3306
3307 On targets that don't support hardware single-step, we don't
3308 currently support full software single-stepping. Instead, we only
3309 support stepping over the thread event breakpoint, by asking the
3310 low target where to place a reinsert breakpoint. Since this
3311 routine assumes the breakpoint being stepped over is a thread event
3312 breakpoint, it usually assumes the return address of the current
3313 function is a good enough place to set the reinsert breakpoint. */
3314
3315 static int
3316 start_step_over (struct lwp_info *lwp)
3317 {
3318 struct thread_info *saved_inferior;
3319 CORE_ADDR pc;
3320 int step;
3321
3322 if (debug_threads)
3323 fprintf (stderr,
3324 "Starting step-over on LWP %ld. Stopping all threads\n",
3325 lwpid_of (lwp));
3326
3327 stop_all_lwps (1, lwp);
3328 gdb_assert (lwp->suspended == 0);
3329
3330 if (debug_threads)
3331 fprintf (stderr, "Done stopping all threads for step-over.\n");
3332
3333 /* Note, we should always reach here with an already adjusted PC,
3334 either by GDB (if we're resuming due to GDB's request), or by our
3335 caller, if we just finished handling an internal breakpoint GDB
3336 shouldn't care about. */
3337 pc = get_pc (lwp);
3338
3339 saved_inferior = current_inferior;
3340 current_inferior = get_lwp_thread (lwp);
3341
3342 lwp->bp_reinsert = pc;
3343 uninsert_breakpoints_at (pc);
3344 uninsert_fast_tracepoint_jumps_at (pc);
3345
3346 if (can_hardware_single_step ())
3347 {
3348 step = 1;
3349 }
3350 else
3351 {
3352 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3353 set_reinsert_breakpoint (raddr);
3354 step = 0;
3355 }
3356
3357 current_inferior = saved_inferior;
3358
3359 linux_resume_one_lwp (lwp, step, 0, NULL);
3360
3361 /* Require next event from this LWP. */
3362 step_over_bkpt = lwp->head.id;
3363 return 1;
3364 }
3365
3366 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3367 start_step_over, if still there, and delete any reinsert
3368 breakpoints we've set, on non hardware single-step targets. */
3369
3370 static int
3371 finish_step_over (struct lwp_info *lwp)
3372 {
3373 if (lwp->bp_reinsert != 0)
3374 {
3375 if (debug_threads)
3376 fprintf (stderr, "Finished step over.\n");
3377
3378 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3379 may be no breakpoint to reinsert there by now. */
3380 reinsert_breakpoints_at (lwp->bp_reinsert);
3381 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3382
3383 lwp->bp_reinsert = 0;
3384
3385 /* Delete any software-single-step reinsert breakpoints. No
3386 longer needed. We don't have to worry about other threads
3387 hitting this trap, and later not being able to explain it,
3388 because we were stepping over a breakpoint, and we hold all
3389 threads but LWP stopped while doing that. */
3390 if (!can_hardware_single_step ())
3391 delete_reinsert_breakpoints ();
3392
3393 step_over_bkpt = null_ptid;
3394 return 1;
3395 }
3396 else
3397 return 0;
3398 }
3399
3400 /* This function is called once per thread. We check the thread's resume
3401 request, which will tell us whether to resume, step, or leave the thread
3402 stopped; and what signal, if any, it should be sent.
3403
3404 For threads which we aren't explicitly told otherwise, we preserve
3405 the stepping flag; this is used for stepping over gdbserver-placed
3406 breakpoints.
3407
3408 If pending_flags was set in any thread, we queue any needed
3409 signals, since we won't actually resume. We already have a pending
3410 event to report, so we don't need to preserve any step requests;
3411 they should be re-issued if necessary. */
3412
3413 static int
3414 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3415 {
3416 struct lwp_info *lwp;
3417 struct thread_info *thread;
3418 int step;
3419 int leave_all_stopped = * (int *) arg;
3420 int leave_pending;
3421
3422 thread = (struct thread_info *) entry;
3423 lwp = get_thread_lwp (thread);
3424
3425 if (lwp->resume == NULL)
3426 return 0;
3427
3428 if (lwp->resume->kind == resume_stop)
3429 {
3430 if (debug_threads)
3431 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3432
3433 if (!lwp->stopped)
3434 {
3435 if (debug_threads)
3436 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3437
3438 /* Stop the thread, and wait for the event asynchronously,
3439 through the event loop. */
3440 send_sigstop (lwp);
3441 }
3442 else
3443 {
3444 if (debug_threads)
3445 fprintf (stderr, "already stopped LWP %ld\n",
3446 lwpid_of (lwp));
3447
3448 /* The LWP may have been stopped in an internal event that
3449 was not meant to be notified back to GDB (e.g., gdbserver
3450 breakpoint), so we should be reporting a stop event in
3451 this case too. */
3452
3453 /* If the thread already has a pending SIGSTOP, this is a
3454 no-op. Otherwise, something later will presumably resume
3455 the thread and this will cause it to cancel any pending
3456 operation, due to last_resume_kind == resume_stop. If
3457 the thread already has a pending status to report, we
3458 will still report it the next time we wait - see
3459 status_pending_p_callback. */
3460
3461 /* If we already have a pending signal to report, then
3462 there's no need to queue a SIGSTOP, as this means we're
3463 midway through moving the LWP out of the jumppad, and we
3464 will report the pending signal as soon as that is
3465 finished. */
3466 if (lwp->pending_signals_to_report == NULL)
3467 send_sigstop (lwp);
3468 }
3469
3470 /* For stop requests, we're done. */
3471 lwp->resume = NULL;
3472 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3473 return 0;
3474 }
3475
3476 /* If this thread which is about to be resumed has a pending status,
3477 then don't resume any threads - we can just report the pending
3478 status. Make sure to queue any signals that would otherwise be
3479 sent. In all-stop mode, we do this decision based on if *any*
3480 thread has a pending status. If there's a thread that needs the
3481 step-over-breakpoint dance, then don't resume any other thread
3482 but that particular one. */
3483 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3484
3485 if (!leave_pending)
3486 {
3487 if (debug_threads)
3488 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3489
3490 step = (lwp->resume->kind == resume_step);
3491 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3492 }
3493 else
3494 {
3495 if (debug_threads)
3496 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3497
3498 /* If we have a new signal, enqueue the signal. */
3499 if (lwp->resume->sig != 0)
3500 {
3501 struct pending_signals *p_sig;
3502 p_sig = xmalloc (sizeof (*p_sig));
3503 p_sig->prev = lwp->pending_signals;
3504 p_sig->signal = lwp->resume->sig;
3505 memset (&p_sig->info, 0, sizeof (siginfo_t));
3506
3507 /* If this is the same signal we were previously stopped by,
3508 make sure to queue its siginfo. We can ignore the return
3509 value of ptrace; if it fails, we'll skip
3510 PTRACE_SETSIGINFO. */
3511 if (WIFSTOPPED (lwp->last_status)
3512 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3513 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3514
3515 lwp->pending_signals = p_sig;
3516 }
3517 }
3518
3519 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3520 lwp->resume = NULL;
3521 return 0;
3522 }
3523
3524 static void
3525 linux_resume (struct thread_resume *resume_info, size_t n)
3526 {
3527 struct thread_resume_array array = { resume_info, n };
3528 struct lwp_info *need_step_over = NULL;
3529 int any_pending;
3530 int leave_all_stopped;
3531
3532 find_inferior (&all_threads, linux_set_resume_request, &array);
3533
3534 /* If there is a thread which would otherwise be resumed, which has
3535 a pending status, then don't resume any threads - we can just
3536 report the pending status. Make sure to queue any signals that
3537 would otherwise be sent. In non-stop mode, we'll apply this
3538 logic to each thread individually. We consume all pending events
3539 before considering to start a step-over (in all-stop). */
3540 any_pending = 0;
3541 if (!non_stop)
3542 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3543
3544 /* If there is a thread which would otherwise be resumed, which is
3545 stopped at a breakpoint that needs stepping over, then don't
3546 resume any threads - have it step over the breakpoint with all
3547 other threads stopped, then resume all threads again. Make sure
3548 to queue any signals that would otherwise be delivered or
3549 queued. */
3550 if (!any_pending && supports_breakpoints ())
3551 need_step_over
3552 = (struct lwp_info *) find_inferior (&all_lwps,
3553 need_step_over_p, NULL);
3554
3555 leave_all_stopped = (need_step_over != NULL || any_pending);
3556
3557 if (debug_threads)
3558 {
3559 if (need_step_over != NULL)
3560 fprintf (stderr, "Not resuming all, need step over\n");
3561 else if (any_pending)
3562 fprintf (stderr,
3563 "Not resuming, all-stop and found "
3564 "an LWP with pending status\n");
3565 else
3566 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3567 }
3568
3569 /* Even if we're leaving threads stopped, queue all signals we'd
3570 otherwise deliver. */
3571 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3572
3573 if (need_step_over)
3574 start_step_over (need_step_over);
3575 }
3576
3577 /* This function is called once per thread. We check the thread's
3578 last resume request, which will tell us whether to resume, step, or
3579 leave the thread stopped. Any signal the client requested to be
3580 delivered has already been enqueued at this point.
3581
3582 If any thread that GDB wants running is stopped at an internal
3583 breakpoint that needs stepping over, we start a step-over operation
3584 on that particular thread, and leave all others stopped. */
3585
3586 static int
3587 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3588 {
3589 struct lwp_info *lwp = (struct lwp_info *) entry;
3590 struct thread_info *thread;
3591 int step;
3592
3593 if (lwp == except)
3594 return 0;
3595
3596 if (debug_threads)
3597 fprintf (stderr,
3598 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3599
3600 if (!lwp->stopped)
3601 {
3602 if (debug_threads)
3603 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3604 return 0;
3605 }
3606
3607 thread = get_lwp_thread (lwp);
3608
3609 if (thread->last_resume_kind == resume_stop
3610 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3611 {
3612 if (debug_threads)
3613 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3614 lwpid_of (lwp));
3615 return 0;
3616 }
3617
3618 if (lwp->status_pending_p)
3619 {
3620 if (debug_threads)
3621 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3622 lwpid_of (lwp));
3623 return 0;
3624 }
3625
3626 gdb_assert (lwp->suspended >= 0);
3627
3628 if (lwp->suspended)
3629 {
3630 if (debug_threads)
3631 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3632 return 0;
3633 }
3634
3635 if (thread->last_resume_kind == resume_stop
3636 && lwp->pending_signals_to_report == NULL
3637 && lwp->collecting_fast_tracepoint == 0)
3638 {
3639 /* We haven't reported this LWP as stopped yet (otherwise, the
3640 last_status.kind check above would catch it, and we wouldn't
3641 reach here. This LWP may have been momentarily paused by a
3642 stop_all_lwps call while handling for example, another LWP's
3643 step-over. In that case, the pending expected SIGSTOP signal
3644 that was queued at vCont;t handling time will have already
3645 been consumed by wait_for_sigstop, and so we need to requeue
3646 another one here. Note that if the LWP already has a SIGSTOP
3647 pending, this is a no-op. */
3648
3649 if (debug_threads)
3650 fprintf (stderr,
3651 "Client wants LWP %ld to stop. "
3652 "Making sure it has a SIGSTOP pending\n",
3653 lwpid_of (lwp));
3654
3655 send_sigstop (lwp);
3656 }
3657
3658 step = thread->last_resume_kind == resume_step;
3659 linux_resume_one_lwp (lwp, step, 0, NULL);
3660 return 0;
3661 }
3662
3663 static int
3664 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3665 {
3666 struct lwp_info *lwp = (struct lwp_info *) entry;
3667
3668 if (lwp == except)
3669 return 0;
3670
3671 lwp->suspended--;
3672 gdb_assert (lwp->suspended >= 0);
3673
3674 return proceed_one_lwp (entry, except);
3675 }
3676
3677 /* When we finish a step-over, set threads running again. If there's
3678 another thread that may need a step-over, now's the time to start
3679 it. Eventually, we'll move all threads past their breakpoints. */
3680
3681 static void
3682 proceed_all_lwps (void)
3683 {
3684 struct lwp_info *need_step_over;
3685
3686 /* If there is a thread which would otherwise be resumed, which is
3687 stopped at a breakpoint that needs stepping over, then don't
3688 resume any threads - have it step over the breakpoint with all
3689 other threads stopped, then resume all threads again. */
3690
3691 if (supports_breakpoints ())
3692 {
3693 need_step_over
3694 = (struct lwp_info *) find_inferior (&all_lwps,
3695 need_step_over_p, NULL);
3696
3697 if (need_step_over != NULL)
3698 {
3699 if (debug_threads)
3700 fprintf (stderr, "proceed_all_lwps: found "
3701 "thread %ld needing a step-over\n",
3702 lwpid_of (need_step_over));
3703
3704 start_step_over (need_step_over);
3705 return;
3706 }
3707 }
3708
3709 if (debug_threads)
3710 fprintf (stderr, "Proceeding, no step-over needed\n");
3711
3712 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3713 }
3714
3715 /* Stopped LWPs that the client wanted to be running, that don't have
3716 pending statuses, are set to run again, except for EXCEPT, if not
3717 NULL. This undoes a stop_all_lwps call. */
3718
3719 static void
3720 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3721 {
3722 if (debug_threads)
3723 {
3724 if (except)
3725 fprintf (stderr,
3726 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3727 else
3728 fprintf (stderr,
3729 "unstopping all lwps\n");
3730 }
3731
3732 if (unsuspend)
3733 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3734 else
3735 find_inferior (&all_lwps, proceed_one_lwp, except);
3736 }
3737
3738 #ifdef HAVE_LINUX_USRREGS
3739
3740 int
3741 register_addr (int regnum)
3742 {
3743 int addr;
3744
3745 if (regnum < 0 || regnum >= the_low_target.num_regs)
3746 error ("Invalid register number %d.", regnum);
3747
3748 addr = the_low_target.regmap[regnum];
3749
3750 return addr;
3751 }
3752
3753 /* Fetch one register. */
3754 static void
3755 fetch_register (struct regcache *regcache, int regno)
3756 {
3757 CORE_ADDR regaddr;
3758 int i, size;
3759 char *buf;
3760 int pid;
3761
3762 if (regno >= the_low_target.num_regs)
3763 return;
3764 if ((*the_low_target.cannot_fetch_register) (regno))
3765 return;
3766
3767 regaddr = register_addr (regno);
3768 if (regaddr == -1)
3769 return;
3770
3771 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3772 & -sizeof (PTRACE_XFER_TYPE));
3773 buf = alloca (size);
3774
3775 pid = lwpid_of (get_thread_lwp (current_inferior));
3776 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3777 {
3778 errno = 0;
3779 *(PTRACE_XFER_TYPE *) (buf + i) =
3780 ptrace (PTRACE_PEEKUSER, pid,
3781 /* Coerce to a uintptr_t first to avoid potential gcc warning
3782 of coercing an 8 byte integer to a 4 byte pointer. */
3783 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3784 regaddr += sizeof (PTRACE_XFER_TYPE);
3785 if (errno != 0)
3786 error ("reading register %d: %s", regno, strerror (errno));
3787 }
3788
3789 if (the_low_target.supply_ptrace_register)
3790 the_low_target.supply_ptrace_register (regcache, regno, buf);
3791 else
3792 supply_register (regcache, regno, buf);
3793 }
3794
3795 /* Store one register. */
3796 static void
3797 store_register (struct regcache *regcache, int regno)
3798 {
3799 CORE_ADDR regaddr;
3800 int i, size;
3801 char *buf;
3802 int pid;
3803
3804 if (regno >= the_low_target.num_regs)
3805 return;
3806 if ((*the_low_target.cannot_store_register) (regno))
3807 return;
3808
3809 regaddr = register_addr (regno);
3810 if (regaddr == -1)
3811 return;
3812
3813 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3814 & -sizeof (PTRACE_XFER_TYPE));
3815 buf = alloca (size);
3816 memset (buf, 0, size);
3817
3818 if (the_low_target.collect_ptrace_register)
3819 the_low_target.collect_ptrace_register (regcache, regno, buf);
3820 else
3821 collect_register (regcache, regno, buf);
3822
3823 pid = lwpid_of (get_thread_lwp (current_inferior));
3824 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3825 {
3826 errno = 0;
3827 ptrace (PTRACE_POKEUSER, pid,
3828 /* Coerce to a uintptr_t first to avoid potential gcc warning
3829 about coercing an 8 byte integer to a 4 byte pointer. */
3830 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3831 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3832 if (errno != 0)
3833 {
3834 /* At this point, ESRCH should mean the process is
3835 already gone, in which case we simply ignore attempts
3836 to change its registers. See also the related
3837 comment in linux_resume_one_lwp. */
3838 if (errno == ESRCH)
3839 return;
3840
3841 if ((*the_low_target.cannot_store_register) (regno) == 0)
3842 error ("writing register %d: %s", regno, strerror (errno));
3843 }
3844 regaddr += sizeof (PTRACE_XFER_TYPE);
3845 }
3846 }
3847
3848 /* Fetch all registers, or just one, from the child process. */
3849 static void
3850 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3851 {
3852 if (regno == -1)
3853 for (regno = 0; regno < the_low_target.num_regs; regno++)
3854 fetch_register (regcache, regno);
3855 else
3856 fetch_register (regcache, regno);
3857 }
3858
3859 /* Store our register values back into the inferior.
3860 If REGNO is -1, do this for all registers.
3861 Otherwise, REGNO specifies which register (so we can save time). */
3862 static void
3863 usr_store_inferior_registers (struct regcache *regcache, int regno)
3864 {
3865 if (regno == -1)
3866 for (regno = 0; regno < the_low_target.num_regs; regno++)
3867 store_register (regcache, regno);
3868 else
3869 store_register (regcache, regno);
3870 }
3871 #endif /* HAVE_LINUX_USRREGS */
3872
3873
3874
3875 #ifdef HAVE_LINUX_REGSETS
3876
3877 static int
3878 regsets_fetch_inferior_registers (struct regcache *regcache)
3879 {
3880 struct regset_info *regset;
3881 int saw_general_regs = 0;
3882 int pid;
3883 struct iovec iov;
3884
3885 regset = target_regsets;
3886
3887 pid = lwpid_of (get_thread_lwp (current_inferior));
3888 while (regset->size >= 0)
3889 {
3890 void *buf, *data;
3891 int nt_type, res;
3892
3893 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3894 {
3895 regset ++;
3896 continue;
3897 }
3898
3899 buf = xmalloc (regset->size);
3900
3901 nt_type = regset->nt_type;
3902 if (nt_type)
3903 {
3904 iov.iov_base = buf;
3905 iov.iov_len = regset->size;
3906 data = (void *) &iov;
3907 }
3908 else
3909 data = buf;
3910
3911 #ifndef __sparc__
3912 res = ptrace (regset->get_request, pid, nt_type, data);
3913 #else
3914 res = ptrace (regset->get_request, pid, data, nt_type);
3915 #endif
3916 if (res < 0)
3917 {
3918 if (errno == EIO)
3919 {
3920 /* If we get EIO on a regset, do not try it again for
3921 this process. */
3922 disabled_regsets[regset - target_regsets] = 1;
3923 free (buf);
3924 continue;
3925 }
3926 else
3927 {
3928 char s[256];
3929 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3930 pid);
3931 perror (s);
3932 }
3933 }
3934 else if (regset->type == GENERAL_REGS)
3935 saw_general_regs = 1;
3936 regset->store_function (regcache, buf);
3937 regset ++;
3938 free (buf);
3939 }
3940 if (saw_general_regs)
3941 return 0;
3942 else
3943 return 1;
3944 }
3945
3946 static int
3947 regsets_store_inferior_registers (struct regcache *regcache)
3948 {
3949 struct regset_info *regset;
3950 int saw_general_regs = 0;
3951 int pid;
3952 struct iovec iov;
3953
3954 regset = target_regsets;
3955
3956 pid = lwpid_of (get_thread_lwp (current_inferior));
3957 while (regset->size >= 0)
3958 {
3959 void *buf, *data;
3960 int nt_type, res;
3961
3962 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3963 {
3964 regset ++;
3965 continue;
3966 }
3967
3968 buf = xmalloc (regset->size);
3969
3970 /* First fill the buffer with the current register set contents,
3971 in case there are any items in the kernel's regset that are
3972 not in gdbserver's regcache. */
3973
3974 nt_type = regset->nt_type;
3975 if (nt_type)
3976 {
3977 iov.iov_base = buf;
3978 iov.iov_len = regset->size;
3979 data = (void *) &iov;
3980 }
3981 else
3982 data = buf;
3983
3984 #ifndef __sparc__
3985 res = ptrace (regset->get_request, pid, nt_type, data);
3986 #else
3987 res = ptrace (regset->get_request, pid, &iov, data);
3988 #endif
3989
3990 if (res == 0)
3991 {
3992 /* Then overlay our cached registers on that. */
3993 regset->fill_function (regcache, buf);
3994
3995 /* Only now do we write the register set. */
3996 #ifndef __sparc__
3997 res = ptrace (regset->set_request, pid, nt_type, data);
3998 #else
3999 res = ptrace (regset->set_request, pid, data, nt_type);
4000 #endif
4001 }
4002
4003 if (res < 0)
4004 {
4005 if (errno == EIO)
4006 {
4007 /* If we get EIO on a regset, do not try it again for
4008 this process. */
4009 disabled_regsets[regset - target_regsets] = 1;
4010 free (buf);
4011 continue;
4012 }
4013 else if (errno == ESRCH)
4014 {
4015 /* At this point, ESRCH should mean the process is
4016 already gone, in which case we simply ignore attempts
4017 to change its registers. See also the related
4018 comment in linux_resume_one_lwp. */
4019 free (buf);
4020 return 0;
4021 }
4022 else
4023 {
4024 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4025 }
4026 }
4027 else if (regset->type == GENERAL_REGS)
4028 saw_general_regs = 1;
4029 regset ++;
4030 free (buf);
4031 }
4032 if (saw_general_regs)
4033 return 0;
4034 else
4035 return 1;
4036 return 0;
4037 }
4038
4039 #endif /* HAVE_LINUX_REGSETS */
4040
4041
4042 void
4043 linux_fetch_registers (struct regcache *regcache, int regno)
4044 {
4045 #ifdef HAVE_LINUX_REGSETS
4046 if (regsets_fetch_inferior_registers (regcache) == 0)
4047 return;
4048 #endif
4049 #ifdef HAVE_LINUX_USRREGS
4050 usr_fetch_inferior_registers (regcache, regno);
4051 #endif
4052 }
4053
4054 void
4055 linux_store_registers (struct regcache *regcache, int regno)
4056 {
4057 #ifdef HAVE_LINUX_REGSETS
4058 if (regsets_store_inferior_registers (regcache) == 0)
4059 return;
4060 #endif
4061 #ifdef HAVE_LINUX_USRREGS
4062 usr_store_inferior_registers (regcache, regno);
4063 #endif
4064 }
4065
4066
4067 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4068 to debugger memory starting at MYADDR. */
4069
4070 static int
4071 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4072 {
4073 register int i;
4074 /* Round starting address down to longword boundary. */
4075 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4076 /* Round ending address up; get number of longwords that makes. */
4077 register int count
4078 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4079 / sizeof (PTRACE_XFER_TYPE);
4080 /* Allocate buffer of that many longwords. */
4081 register PTRACE_XFER_TYPE *buffer
4082 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4083 int fd;
4084 char filename[64];
4085 int pid = lwpid_of (get_thread_lwp (current_inferior));
4086
4087 /* Try using /proc. Don't bother for one word. */
4088 if (len >= 3 * sizeof (long))
4089 {
4090 /* We could keep this file open and cache it - possibly one per
4091 thread. That requires some juggling, but is even faster. */
4092 sprintf (filename, "/proc/%d/mem", pid);
4093 fd = open (filename, O_RDONLY | O_LARGEFILE);
4094 if (fd == -1)
4095 goto no_proc;
4096
4097 /* If pread64 is available, use it. It's faster if the kernel
4098 supports it (only one syscall), and it's 64-bit safe even on
4099 32-bit platforms (for instance, SPARC debugging a SPARC64
4100 application). */
4101 #ifdef HAVE_PREAD64
4102 if (pread64 (fd, myaddr, len, memaddr) != len)
4103 #else
4104 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4105 #endif
4106 {
4107 close (fd);
4108 goto no_proc;
4109 }
4110
4111 close (fd);
4112 return 0;
4113 }
4114
4115 no_proc:
4116 /* Read all the longwords */
4117 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4118 {
4119 errno = 0;
4120 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4121 about coercing an 8 byte integer to a 4 byte pointer. */
4122 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4123 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4124 if (errno)
4125 return errno;
4126 }
4127
4128 /* Copy appropriate bytes out of the buffer. */
4129 memcpy (myaddr,
4130 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4131 len);
4132
4133 return 0;
4134 }
4135
4136 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4137 memory at MEMADDR. On failure (cannot write to the inferior)
4138 returns the value of errno. */
4139
4140 static int
4141 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4142 {
4143 register int i;
4144 /* Round starting address down to longword boundary. */
4145 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4146 /* Round ending address up; get number of longwords that makes. */
4147 register int count
4148 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4149 / sizeof (PTRACE_XFER_TYPE);
4150
4151 /* Allocate buffer of that many longwords. */
4152 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4153 alloca (count * sizeof (PTRACE_XFER_TYPE));
4154
4155 int pid = lwpid_of (get_thread_lwp (current_inferior));
4156
4157 if (debug_threads)
4158 {
4159 /* Dump up to four bytes. */
4160 unsigned int val = * (unsigned int *) myaddr;
4161 if (len == 1)
4162 val = val & 0xff;
4163 else if (len == 2)
4164 val = val & 0xffff;
4165 else if (len == 3)
4166 val = val & 0xffffff;
4167 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4168 val, (long)memaddr);
4169 }
4170
4171 /* Fill start and end extra bytes of buffer with existing memory data. */
4172
4173 errno = 0;
4174 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4175 about coercing an 8 byte integer to a 4 byte pointer. */
4176 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4177 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4178 if (errno)
4179 return errno;
4180
4181 if (count > 1)
4182 {
4183 errno = 0;
4184 buffer[count - 1]
4185 = ptrace (PTRACE_PEEKTEXT, pid,
4186 /* Coerce to a uintptr_t first to avoid potential gcc warning
4187 about coercing an 8 byte integer to a 4 byte pointer. */
4188 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4189 * sizeof (PTRACE_XFER_TYPE)),
4190 0);
4191 if (errno)
4192 return errno;
4193 }
4194
4195 /* Copy data to be written over corresponding part of buffer. */
4196
4197 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4198 myaddr, len);
4199
4200 /* Write the entire buffer. */
4201
4202 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4203 {
4204 errno = 0;
4205 ptrace (PTRACE_POKETEXT, pid,
4206 /* Coerce to a uintptr_t first to avoid potential gcc warning
4207 about coercing an 8 byte integer to a 4 byte pointer. */
4208 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4209 (PTRACE_ARG4_TYPE) buffer[i]);
4210 if (errno)
4211 return errno;
4212 }
4213
4214 return 0;
4215 }
4216
4217 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4218 static int linux_supports_tracefork_flag;
4219
4220 static void
4221 linux_enable_event_reporting (int pid)
4222 {
4223 if (!linux_supports_tracefork_flag)
4224 return;
4225
4226 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4227 }
4228
4229 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4230
4231 static int
4232 linux_tracefork_grandchild (void *arg)
4233 {
4234 _exit (0);
4235 }
4236
4237 #define STACK_SIZE 4096
4238
4239 static int
4240 linux_tracefork_child (void *arg)
4241 {
4242 ptrace (PTRACE_TRACEME, 0, 0, 0);
4243 kill (getpid (), SIGSTOP);
4244
4245 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4246
4247 if (fork () == 0)
4248 linux_tracefork_grandchild (NULL);
4249
4250 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4251
4252 #ifdef __ia64__
4253 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4254 CLONE_VM | SIGCHLD, NULL);
4255 #else
4256 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4257 CLONE_VM | SIGCHLD, NULL);
4258 #endif
4259
4260 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4261
4262 _exit (0);
4263 }
4264
4265 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4266 sure that we can enable the option, and that it had the desired
4267 effect. */
4268
4269 static void
4270 linux_test_for_tracefork (void)
4271 {
4272 int child_pid, ret, status;
4273 long second_pid;
4274 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4275 char *stack = xmalloc (STACK_SIZE * 4);
4276 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4277
4278 linux_supports_tracefork_flag = 0;
4279
4280 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4281
4282 child_pid = fork ();
4283 if (child_pid == 0)
4284 linux_tracefork_child (NULL);
4285
4286 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4287
4288 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4289 #ifdef __ia64__
4290 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4291 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4292 #else /* !__ia64__ */
4293 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4294 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4295 #endif /* !__ia64__ */
4296
4297 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4298
4299 if (child_pid == -1)
4300 perror_with_name ("clone");
4301
4302 ret = my_waitpid (child_pid, &status, 0);
4303 if (ret == -1)
4304 perror_with_name ("waitpid");
4305 else if (ret != child_pid)
4306 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4307 if (! WIFSTOPPED (status))
4308 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4309
4310 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4311 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4312 if (ret != 0)
4313 {
4314 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4315 if (ret != 0)
4316 {
4317 warning ("linux_test_for_tracefork: failed to kill child");
4318 return;
4319 }
4320
4321 ret = my_waitpid (child_pid, &status, 0);
4322 if (ret != child_pid)
4323 warning ("linux_test_for_tracefork: failed to wait for killed child");
4324 else if (!WIFSIGNALED (status))
4325 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4326 "killed child", status);
4327
4328 return;
4329 }
4330
4331 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4332 if (ret != 0)
4333 warning ("linux_test_for_tracefork: failed to resume child");
4334
4335 ret = my_waitpid (child_pid, &status, 0);
4336
4337 if (ret == child_pid && WIFSTOPPED (status)
4338 && status >> 16 == PTRACE_EVENT_FORK)
4339 {
4340 second_pid = 0;
4341 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4342 if (ret == 0 && second_pid != 0)
4343 {
4344 int second_status;
4345
4346 linux_supports_tracefork_flag = 1;
4347 my_waitpid (second_pid, &second_status, 0);
4348 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4349 if (ret != 0)
4350 warning ("linux_test_for_tracefork: failed to kill second child");
4351 my_waitpid (second_pid, &status, 0);
4352 }
4353 }
4354 else
4355 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4356 "(%d, status 0x%x)", ret, status);
4357
4358 do
4359 {
4360 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4361 if (ret != 0)
4362 warning ("linux_test_for_tracefork: failed to kill child");
4363 my_waitpid (child_pid, &status, 0);
4364 }
4365 while (WIFSTOPPED (status));
4366
4367 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4368 free (stack);
4369 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4370 }
4371
4372
4373 static void
4374 linux_look_up_symbols (void)
4375 {
4376 #ifdef USE_THREAD_DB
4377 struct process_info *proc = current_process ();
4378
4379 if (proc->private->thread_db != NULL)
4380 return;
4381
4382 /* If the kernel supports tracing forks then it also supports tracing
4383 clones, and then we don't need to use the magic thread event breakpoint
4384 to learn about threads. */
4385 thread_db_init (!linux_supports_tracefork_flag);
4386 #endif
4387 }
4388
4389 static void
4390 linux_request_interrupt (void)
4391 {
4392 extern unsigned long signal_pid;
4393
4394 if (!ptid_equal (cont_thread, null_ptid)
4395 && !ptid_equal (cont_thread, minus_one_ptid))
4396 {
4397 struct lwp_info *lwp;
4398 int lwpid;
4399
4400 lwp = get_thread_lwp (current_inferior);
4401 lwpid = lwpid_of (lwp);
4402 kill_lwp (lwpid, SIGINT);
4403 }
4404 else
4405 kill_lwp (signal_pid, SIGINT);
4406 }
4407
4408 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4409 to debugger memory starting at MYADDR. */
4410
4411 static int
4412 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4413 {
4414 char filename[PATH_MAX];
4415 int fd, n;
4416 int pid = lwpid_of (get_thread_lwp (current_inferior));
4417
4418 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4419
4420 fd = open (filename, O_RDONLY);
4421 if (fd < 0)
4422 return -1;
4423
4424 if (offset != (CORE_ADDR) 0
4425 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4426 n = -1;
4427 else
4428 n = read (fd, myaddr, len);
4429
4430 close (fd);
4431
4432 return n;
4433 }
4434
4435 /* These breakpoint and watchpoint related wrapper functions simply
4436 pass on the function call if the target has registered a
4437 corresponding function. */
4438
4439 static int
4440 linux_insert_point (char type, CORE_ADDR addr, int len)
4441 {
4442 if (the_low_target.insert_point != NULL)
4443 return the_low_target.insert_point (type, addr, len);
4444 else
4445 /* Unsupported (see target.h). */
4446 return 1;
4447 }
4448
4449 static int
4450 linux_remove_point (char type, CORE_ADDR addr, int len)
4451 {
4452 if (the_low_target.remove_point != NULL)
4453 return the_low_target.remove_point (type, addr, len);
4454 else
4455 /* Unsupported (see target.h). */
4456 return 1;
4457 }
4458
4459 static int
4460 linux_stopped_by_watchpoint (void)
4461 {
4462 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4463
4464 return lwp->stopped_by_watchpoint;
4465 }
4466
4467 static CORE_ADDR
4468 linux_stopped_data_address (void)
4469 {
4470 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4471
4472 return lwp->stopped_data_address;
4473 }
4474
4475 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4476 #if defined(__mcoldfire__)
4477 /* These should really be defined in the kernel's ptrace.h header. */
4478 #define PT_TEXT_ADDR 49*4
4479 #define PT_DATA_ADDR 50*4
4480 #define PT_TEXT_END_ADDR 51*4
4481 #elif defined(BFIN)
4482 #define PT_TEXT_ADDR 220
4483 #define PT_TEXT_END_ADDR 224
4484 #define PT_DATA_ADDR 228
4485 #elif defined(__TMS320C6X__)
4486 #define PT_TEXT_ADDR (0x10000*4)
4487 #define PT_DATA_ADDR (0x10004*4)
4488 #define PT_TEXT_END_ADDR (0x10008*4)
4489 #endif
4490
4491 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4492 to tell gdb about. */
4493
4494 static int
4495 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4496 {
4497 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4498 unsigned long text, text_end, data;
4499 int pid = lwpid_of (get_thread_lwp (current_inferior));
4500
4501 errno = 0;
4502
4503 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4504 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4505 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4506
4507 if (errno == 0)
4508 {
4509 /* Both text and data offsets produced at compile-time (and so
4510 used by gdb) are relative to the beginning of the program,
4511 with the data segment immediately following the text segment.
4512 However, the actual runtime layout in memory may put the data
4513 somewhere else, so when we send gdb a data base-address, we
4514 use the real data base address and subtract the compile-time
4515 data base-address from it (which is just the length of the
4516 text segment). BSS immediately follows data in both
4517 cases. */
4518 *text_p = text;
4519 *data_p = data - (text_end - text);
4520
4521 return 1;
4522 }
4523 #endif
4524 return 0;
4525 }
4526 #endif
4527
4528 static int
4529 linux_qxfer_osdata (const char *annex,
4530 unsigned char *readbuf, unsigned const char *writebuf,
4531 CORE_ADDR offset, int len)
4532 {
4533 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4534 }
4535
4536 /* Convert a native/host siginfo object, into/from the siginfo in the
4537 layout of the inferiors' architecture. */
4538
4539 static void
4540 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4541 {
4542 int done = 0;
4543
4544 if (the_low_target.siginfo_fixup != NULL)
4545 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4546
4547 /* If there was no callback, or the callback didn't do anything,
4548 then just do a straight memcpy. */
4549 if (!done)
4550 {
4551 if (direction == 1)
4552 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4553 else
4554 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4555 }
4556 }
4557
4558 static int
4559 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4560 unsigned const char *writebuf, CORE_ADDR offset, int len)
4561 {
4562 int pid;
4563 struct siginfo siginfo;
4564 char inf_siginfo[sizeof (struct siginfo)];
4565
4566 if (current_inferior == NULL)
4567 return -1;
4568
4569 pid = lwpid_of (get_thread_lwp (current_inferior));
4570
4571 if (debug_threads)
4572 fprintf (stderr, "%s siginfo for lwp %d.\n",
4573 readbuf != NULL ? "Reading" : "Writing",
4574 pid);
4575
4576 if (offset >= sizeof (siginfo))
4577 return -1;
4578
4579 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4580 return -1;
4581
4582 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4583 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4584 inferior with a 64-bit GDBSERVER should look the same as debugging it
4585 with a 32-bit GDBSERVER, we need to convert it. */
4586 siginfo_fixup (&siginfo, inf_siginfo, 0);
4587
4588 if (offset + len > sizeof (siginfo))
4589 len = sizeof (siginfo) - offset;
4590
4591 if (readbuf != NULL)
4592 memcpy (readbuf, inf_siginfo + offset, len);
4593 else
4594 {
4595 memcpy (inf_siginfo + offset, writebuf, len);
4596
4597 /* Convert back to ptrace layout before flushing it out. */
4598 siginfo_fixup (&siginfo, inf_siginfo, 1);
4599
4600 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4601 return -1;
4602 }
4603
4604 return len;
4605 }
4606
4607 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4608 so we notice when children change state; as the handler for the
4609 sigsuspend in my_waitpid. */
4610
4611 static void
4612 sigchld_handler (int signo)
4613 {
4614 int old_errno = errno;
4615
4616 if (debug_threads)
4617 {
4618 do
4619 {
4620 /* fprintf is not async-signal-safe, so call write
4621 directly. */
4622 if (write (2, "sigchld_handler\n",
4623 sizeof ("sigchld_handler\n") - 1) < 0)
4624 break; /* just ignore */
4625 } while (0);
4626 }
4627
4628 if (target_is_async_p ())
4629 async_file_mark (); /* trigger a linux_wait */
4630
4631 errno = old_errno;
4632 }
4633
4634 static int
4635 linux_supports_non_stop (void)
4636 {
4637 return 1;
4638 }
4639
4640 static int
4641 linux_async (int enable)
4642 {
4643 int previous = (linux_event_pipe[0] != -1);
4644
4645 if (debug_threads)
4646 fprintf (stderr, "linux_async (%d), previous=%d\n",
4647 enable, previous);
4648
4649 if (previous != enable)
4650 {
4651 sigset_t mask;
4652 sigemptyset (&mask);
4653 sigaddset (&mask, SIGCHLD);
4654
4655 sigprocmask (SIG_BLOCK, &mask, NULL);
4656
4657 if (enable)
4658 {
4659 if (pipe (linux_event_pipe) == -1)
4660 fatal ("creating event pipe failed.");
4661
4662 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4663 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4664
4665 /* Register the event loop handler. */
4666 add_file_handler (linux_event_pipe[0],
4667 handle_target_event, NULL);
4668
4669 /* Always trigger a linux_wait. */
4670 async_file_mark ();
4671 }
4672 else
4673 {
4674 delete_file_handler (linux_event_pipe[0]);
4675
4676 close (linux_event_pipe[0]);
4677 close (linux_event_pipe[1]);
4678 linux_event_pipe[0] = -1;
4679 linux_event_pipe[1] = -1;
4680 }
4681
4682 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4683 }
4684
4685 return previous;
4686 }
4687
4688 static int
4689 linux_start_non_stop (int nonstop)
4690 {
4691 /* Register or unregister from event-loop accordingly. */
4692 linux_async (nonstop);
4693 return 0;
4694 }
4695
4696 static int
4697 linux_supports_multi_process (void)
4698 {
4699 return 1;
4700 }
4701
4702 static int
4703 linux_supports_disable_randomization (void)
4704 {
4705 #ifdef HAVE_PERSONALITY
4706 return 1;
4707 #else
4708 return 0;
4709 #endif
4710 }
4711
4712 /* Enumerate spufs IDs for process PID. */
4713 static int
4714 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4715 {
4716 int pos = 0;
4717 int written = 0;
4718 char path[128];
4719 DIR *dir;
4720 struct dirent *entry;
4721
4722 sprintf (path, "/proc/%ld/fd", pid);
4723 dir = opendir (path);
4724 if (!dir)
4725 return -1;
4726
4727 rewinddir (dir);
4728 while ((entry = readdir (dir)) != NULL)
4729 {
4730 struct stat st;
4731 struct statfs stfs;
4732 int fd;
4733
4734 fd = atoi (entry->d_name);
4735 if (!fd)
4736 continue;
4737
4738 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4739 if (stat (path, &st) != 0)
4740 continue;
4741 if (!S_ISDIR (st.st_mode))
4742 continue;
4743
4744 if (statfs (path, &stfs) != 0)
4745 continue;
4746 if (stfs.f_type != SPUFS_MAGIC)
4747 continue;
4748
4749 if (pos >= offset && pos + 4 <= offset + len)
4750 {
4751 *(unsigned int *)(buf + pos - offset) = fd;
4752 written += 4;
4753 }
4754 pos += 4;
4755 }
4756
4757 closedir (dir);
4758 return written;
4759 }
4760
4761 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4762 object type, using the /proc file system. */
4763 static int
4764 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4765 unsigned const char *writebuf,
4766 CORE_ADDR offset, int len)
4767 {
4768 long pid = lwpid_of (get_thread_lwp (current_inferior));
4769 char buf[128];
4770 int fd = 0;
4771 int ret = 0;
4772
4773 if (!writebuf && !readbuf)
4774 return -1;
4775
4776 if (!*annex)
4777 {
4778 if (!readbuf)
4779 return -1;
4780 else
4781 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4782 }
4783
4784 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4785 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4786 if (fd <= 0)
4787 return -1;
4788
4789 if (offset != 0
4790 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4791 {
4792 close (fd);
4793 return 0;
4794 }
4795
4796 if (writebuf)
4797 ret = write (fd, writebuf, (size_t) len);
4798 else
4799 ret = read (fd, readbuf, (size_t) len);
4800
4801 close (fd);
4802 return ret;
4803 }
4804
4805 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4806 struct target_loadseg
4807 {
4808 /* Core address to which the segment is mapped. */
4809 Elf32_Addr addr;
4810 /* VMA recorded in the program header. */
4811 Elf32_Addr p_vaddr;
4812 /* Size of this segment in memory. */
4813 Elf32_Word p_memsz;
4814 };
4815
4816 # if defined PT_GETDSBT
4817 struct target_loadmap
4818 {
4819 /* Protocol version number, must be zero. */
4820 Elf32_Word version;
4821 /* Pointer to the DSBT table, its size, and the DSBT index. */
4822 unsigned *dsbt_table;
4823 unsigned dsbt_size, dsbt_index;
4824 /* Number of segments in this map. */
4825 Elf32_Word nsegs;
4826 /* The actual memory map. */
4827 struct target_loadseg segs[/*nsegs*/];
4828 };
4829 # define LINUX_LOADMAP PT_GETDSBT
4830 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4831 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4832 # else
4833 struct target_loadmap
4834 {
4835 /* Protocol version number, must be zero. */
4836 Elf32_Half version;
4837 /* Number of segments in this map. */
4838 Elf32_Half nsegs;
4839 /* The actual memory map. */
4840 struct target_loadseg segs[/*nsegs*/];
4841 };
4842 # define LINUX_LOADMAP PTRACE_GETFDPIC
4843 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4844 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4845 # endif
4846
4847 static int
4848 linux_read_loadmap (const char *annex, CORE_ADDR offset,
4849 unsigned char *myaddr, unsigned int len)
4850 {
4851 int pid = lwpid_of (get_thread_lwp (current_inferior));
4852 int addr = -1;
4853 struct target_loadmap *data = NULL;
4854 unsigned int actual_length, copy_length;
4855
4856 if (strcmp (annex, "exec") == 0)
4857 addr = (int) LINUX_LOADMAP_EXEC;
4858 else if (strcmp (annex, "interp") == 0)
4859 addr = (int) LINUX_LOADMAP_INTERP;
4860 else
4861 return -1;
4862
4863 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
4864 return -1;
4865
4866 if (data == NULL)
4867 return -1;
4868
4869 actual_length = sizeof (struct target_loadmap)
4870 + sizeof (struct target_loadseg) * data->nsegs;
4871
4872 if (offset < 0 || offset > actual_length)
4873 return -1;
4874
4875 copy_length = actual_length - offset < len ? actual_length - offset : len;
4876 memcpy (myaddr, (char *) data + offset, copy_length);
4877 return copy_length;
4878 }
4879 #else
4880 # define linux_read_loadmap NULL
4881 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
4882
4883 static void
4884 linux_process_qsupported (const char *query)
4885 {
4886 if (the_low_target.process_qsupported != NULL)
4887 the_low_target.process_qsupported (query);
4888 }
4889
4890 static int
4891 linux_supports_tracepoints (void)
4892 {
4893 if (*the_low_target.supports_tracepoints == NULL)
4894 return 0;
4895
4896 return (*the_low_target.supports_tracepoints) ();
4897 }
4898
4899 static CORE_ADDR
4900 linux_read_pc (struct regcache *regcache)
4901 {
4902 if (the_low_target.get_pc == NULL)
4903 return 0;
4904
4905 return (*the_low_target.get_pc) (regcache);
4906 }
4907
4908 static void
4909 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4910 {
4911 gdb_assert (the_low_target.set_pc != NULL);
4912
4913 (*the_low_target.set_pc) (regcache, pc);
4914 }
4915
4916 static int
4917 linux_thread_stopped (struct thread_info *thread)
4918 {
4919 return get_thread_lwp (thread)->stopped;
4920 }
4921
4922 /* This exposes stop-all-threads functionality to other modules. */
4923
4924 static void
4925 linux_pause_all (int freeze)
4926 {
4927 stop_all_lwps (freeze, NULL);
4928 }
4929
4930 /* This exposes unstop-all-threads functionality to other gdbserver
4931 modules. */
4932
4933 static void
4934 linux_unpause_all (int unfreeze)
4935 {
4936 unstop_all_lwps (unfreeze, NULL);
4937 }
4938
4939 static int
4940 linux_prepare_to_access_memory (void)
4941 {
4942 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4943 running LWP. */
4944 if (non_stop)
4945 linux_pause_all (1);
4946 return 0;
4947 }
4948
4949 static void
4950 linux_done_accessing_memory (void)
4951 {
4952 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4953 running LWP. */
4954 if (non_stop)
4955 linux_unpause_all (1);
4956 }
4957
4958 static int
4959 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4960 CORE_ADDR collector,
4961 CORE_ADDR lockaddr,
4962 ULONGEST orig_size,
4963 CORE_ADDR *jump_entry,
4964 CORE_ADDR *trampoline,
4965 ULONGEST *trampoline_size,
4966 unsigned char *jjump_pad_insn,
4967 ULONGEST *jjump_pad_insn_size,
4968 CORE_ADDR *adjusted_insn_addr,
4969 CORE_ADDR *adjusted_insn_addr_end,
4970 char *err)
4971 {
4972 return (*the_low_target.install_fast_tracepoint_jump_pad)
4973 (tpoint, tpaddr, collector, lockaddr, orig_size,
4974 jump_entry, trampoline, trampoline_size,
4975 jjump_pad_insn, jjump_pad_insn_size,
4976 adjusted_insn_addr, adjusted_insn_addr_end,
4977 err);
4978 }
4979
4980 static struct emit_ops *
4981 linux_emit_ops (void)
4982 {
4983 if (the_low_target.emit_ops != NULL)
4984 return (*the_low_target.emit_ops) ();
4985 else
4986 return NULL;
4987 }
4988
4989 static int
4990 linux_get_min_fast_tracepoint_insn_len (void)
4991 {
4992 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
4993 }
4994
4995 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
4996
4997 static int
4998 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
4999 CORE_ADDR *phdr_memaddr, int *num_phdr)
5000 {
5001 char filename[PATH_MAX];
5002 int fd;
5003 const int auxv_size = is_elf64
5004 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5005 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5006
5007 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5008
5009 fd = open (filename, O_RDONLY);
5010 if (fd < 0)
5011 return 1;
5012
5013 *phdr_memaddr = 0;
5014 *num_phdr = 0;
5015 while (read (fd, buf, auxv_size) == auxv_size
5016 && (*phdr_memaddr == 0 || *num_phdr == 0))
5017 {
5018 if (is_elf64)
5019 {
5020 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5021
5022 switch (aux->a_type)
5023 {
5024 case AT_PHDR:
5025 *phdr_memaddr = aux->a_un.a_val;
5026 break;
5027 case AT_PHNUM:
5028 *num_phdr = aux->a_un.a_val;
5029 break;
5030 }
5031 }
5032 else
5033 {
5034 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5035
5036 switch (aux->a_type)
5037 {
5038 case AT_PHDR:
5039 *phdr_memaddr = aux->a_un.a_val;
5040 break;
5041 case AT_PHNUM:
5042 *num_phdr = aux->a_un.a_val;
5043 break;
5044 }
5045 }
5046 }
5047
5048 close (fd);
5049
5050 if (*phdr_memaddr == 0 || *num_phdr == 0)
5051 {
5052 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5053 "phdr_memaddr = %ld, phdr_num = %d",
5054 (long) *phdr_memaddr, *num_phdr);
5055 return 2;
5056 }
5057
5058 return 0;
5059 }
5060
5061 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5062
5063 static CORE_ADDR
5064 get_dynamic (const int pid, const int is_elf64)
5065 {
5066 CORE_ADDR phdr_memaddr, relocation;
5067 int num_phdr, i;
5068 unsigned char *phdr_buf;
5069 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5070
5071 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5072 return 0;
5073
5074 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5075 phdr_buf = alloca (num_phdr * phdr_size);
5076
5077 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5078 return 0;
5079
5080 /* Compute relocation: it is expected to be 0 for "regular" executables,
5081 non-zero for PIE ones. */
5082 relocation = -1;
5083 for (i = 0; relocation == -1 && i < num_phdr; i++)
5084 if (is_elf64)
5085 {
5086 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5087
5088 if (p->p_type == PT_PHDR)
5089 relocation = phdr_memaddr - p->p_vaddr;
5090 }
5091 else
5092 {
5093 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5094
5095 if (p->p_type == PT_PHDR)
5096 relocation = phdr_memaddr - p->p_vaddr;
5097 }
5098
5099 if (relocation == -1)
5100 {
5101 warning ("Unexpected missing PT_PHDR");
5102 return 0;
5103 }
5104
5105 for (i = 0; i < num_phdr; i++)
5106 {
5107 if (is_elf64)
5108 {
5109 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5110
5111 if (p->p_type == PT_DYNAMIC)
5112 return p->p_vaddr + relocation;
5113 }
5114 else
5115 {
5116 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5117
5118 if (p->p_type == PT_DYNAMIC)
5119 return p->p_vaddr + relocation;
5120 }
5121 }
5122
5123 return 0;
5124 }
5125
5126 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5127 can be 0 if the inferior does not yet have the library list initialized. */
5128
5129 static CORE_ADDR
5130 get_r_debug (const int pid, const int is_elf64)
5131 {
5132 CORE_ADDR dynamic_memaddr;
5133 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5134 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5135
5136 dynamic_memaddr = get_dynamic (pid, is_elf64);
5137 if (dynamic_memaddr == 0)
5138 return (CORE_ADDR) -1;
5139
5140 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5141 {
5142 if (is_elf64)
5143 {
5144 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5145
5146 if (dyn->d_tag == DT_DEBUG)
5147 return dyn->d_un.d_val;
5148
5149 if (dyn->d_tag == DT_NULL)
5150 break;
5151 }
5152 else
5153 {
5154 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5155
5156 if (dyn->d_tag == DT_DEBUG)
5157 return dyn->d_un.d_val;
5158
5159 if (dyn->d_tag == DT_NULL)
5160 break;
5161 }
5162
5163 dynamic_memaddr += dyn_size;
5164 }
5165
5166 return (CORE_ADDR) -1;
5167 }
5168
5169 /* Read one pointer from MEMADDR in the inferior. */
5170
5171 static int
5172 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5173 {
5174 *ptr = 0;
5175 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5176 }
5177
5178 struct link_map_offsets
5179 {
5180 /* Offset and size of r_debug.r_version. */
5181 int r_version_offset;
5182
5183 /* Offset and size of r_debug.r_map. */
5184 int r_map_offset;
5185
5186 /* Offset to l_addr field in struct link_map. */
5187 int l_addr_offset;
5188
5189 /* Offset to l_name field in struct link_map. */
5190 int l_name_offset;
5191
5192 /* Offset to l_ld field in struct link_map. */
5193 int l_ld_offset;
5194
5195 /* Offset to l_next field in struct link_map. */
5196 int l_next_offset;
5197
5198 /* Offset to l_prev field in struct link_map. */
5199 int l_prev_offset;
5200 };
5201
5202 /* Construct qXfer:libraries:read reply. */
5203
5204 static int
5205 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5206 unsigned const char *writebuf,
5207 CORE_ADDR offset, int len)
5208 {
5209 char *document;
5210 unsigned document_len;
5211 struct process_info_private *const priv = current_process ()->private;
5212 char filename[PATH_MAX];
5213 int pid, is_elf64;
5214
5215 static const struct link_map_offsets lmo_32bit_offsets =
5216 {
5217 0, /* r_version offset. */
5218 4, /* r_debug.r_map offset. */
5219 0, /* l_addr offset in link_map. */
5220 4, /* l_name offset in link_map. */
5221 8, /* l_ld offset in link_map. */
5222 12, /* l_next offset in link_map. */
5223 16 /* l_prev offset in link_map. */
5224 };
5225
5226 static const struct link_map_offsets lmo_64bit_offsets =
5227 {
5228 0, /* r_version offset. */
5229 8, /* r_debug.r_map offset. */
5230 0, /* l_addr offset in link_map. */
5231 8, /* l_name offset in link_map. */
5232 16, /* l_ld offset in link_map. */
5233 24, /* l_next offset in link_map. */
5234 32 /* l_prev offset in link_map. */
5235 };
5236 const struct link_map_offsets *lmo;
5237
5238 if (writebuf != NULL)
5239 return -2;
5240 if (readbuf == NULL)
5241 return -1;
5242
5243 pid = lwpid_of (get_thread_lwp (current_inferior));
5244 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5245 is_elf64 = elf_64_file_p (filename);
5246 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5247
5248 if (priv->r_debug == 0)
5249 priv->r_debug = get_r_debug (pid, is_elf64);
5250
5251 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5252 {
5253 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5254 }
5255 else
5256 {
5257 int allocated = 1024;
5258 char *p;
5259 const int ptr_size = is_elf64 ? 8 : 4;
5260 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5261 int r_version, header_done = 0;
5262
5263 document = xmalloc (allocated);
5264 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5265 p = document + strlen (document);
5266
5267 r_version = 0;
5268 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5269 (unsigned char *) &r_version,
5270 sizeof (r_version)) != 0
5271 || r_version != 1)
5272 {
5273 warning ("unexpected r_debug version %d", r_version);
5274 goto done;
5275 }
5276
5277 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5278 &lm_addr, ptr_size) != 0)
5279 {
5280 warning ("unable to read r_map from 0x%lx",
5281 (long) priv->r_debug + lmo->r_map_offset);
5282 goto done;
5283 }
5284
5285 lm_prev = 0;
5286 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5287 &l_name, ptr_size) == 0
5288 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5289 &l_addr, ptr_size) == 0
5290 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5291 &l_ld, ptr_size) == 0
5292 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5293 &l_prev, ptr_size) == 0
5294 && read_one_ptr (lm_addr + lmo->l_next_offset,
5295 &l_next, ptr_size) == 0)
5296 {
5297 unsigned char libname[PATH_MAX];
5298
5299 if (lm_prev != l_prev)
5300 {
5301 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5302 (long) lm_prev, (long) l_prev);
5303 break;
5304 }
5305
5306 /* Not checking for error because reading may stop before
5307 we've got PATH_MAX worth of characters. */
5308 libname[0] = '\0';
5309 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5310 libname[sizeof (libname) - 1] = '\0';
5311 if (libname[0] != '\0')
5312 {
5313 /* 6x the size for xml_escape_text below. */
5314 size_t len = 6 * strlen ((char *) libname);
5315 char *name;
5316
5317 if (!header_done)
5318 {
5319 /* Terminate `<library-list-svr4'. */
5320 *p++ = '>';
5321 header_done = 1;
5322 }
5323
5324 while (allocated < p - document + len + 200)
5325 {
5326 /* Expand to guarantee sufficient storage. */
5327 uintptr_t document_len = p - document;
5328
5329 document = xrealloc (document, 2 * allocated);
5330 allocated *= 2;
5331 p = document + document_len;
5332 }
5333
5334 name = xml_escape_text ((char *) libname);
5335 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5336 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5337 name, (unsigned long) lm_addr,
5338 (unsigned long) l_addr, (unsigned long) l_ld);
5339 free (name);
5340 }
5341 else if (lm_prev == 0)
5342 {
5343 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5344 p = p + strlen (p);
5345 }
5346
5347 if (l_next == 0)
5348 break;
5349
5350 lm_prev = lm_addr;
5351 lm_addr = l_next;
5352 }
5353 done:
5354 strcpy (p, "</library-list-svr4>");
5355 }
5356
5357 document_len = strlen (document);
5358 if (offset < document_len)
5359 document_len -= offset;
5360 else
5361 document_len = 0;
5362 if (len > document_len)
5363 len = document_len;
5364
5365 memcpy (readbuf, document + offset, len);
5366 xfree (document);
5367
5368 return len;
5369 }
5370
5371 static struct target_ops linux_target_ops = {
5372 linux_create_inferior,
5373 linux_attach,
5374 linux_kill,
5375 linux_detach,
5376 linux_mourn,
5377 linux_join,
5378 linux_thread_alive,
5379 linux_resume,
5380 linux_wait,
5381 linux_fetch_registers,
5382 linux_store_registers,
5383 linux_prepare_to_access_memory,
5384 linux_done_accessing_memory,
5385 linux_read_memory,
5386 linux_write_memory,
5387 linux_look_up_symbols,
5388 linux_request_interrupt,
5389 linux_read_auxv,
5390 linux_insert_point,
5391 linux_remove_point,
5392 linux_stopped_by_watchpoint,
5393 linux_stopped_data_address,
5394 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5395 linux_read_offsets,
5396 #else
5397 NULL,
5398 #endif
5399 #ifdef USE_THREAD_DB
5400 thread_db_get_tls_address,
5401 #else
5402 NULL,
5403 #endif
5404 linux_qxfer_spu,
5405 hostio_last_error_from_errno,
5406 linux_qxfer_osdata,
5407 linux_xfer_siginfo,
5408 linux_supports_non_stop,
5409 linux_async,
5410 linux_start_non_stop,
5411 linux_supports_multi_process,
5412 #ifdef USE_THREAD_DB
5413 thread_db_handle_monitor_command,
5414 #else
5415 NULL,
5416 #endif
5417 linux_common_core_of_thread,
5418 linux_read_loadmap,
5419 linux_process_qsupported,
5420 linux_supports_tracepoints,
5421 linux_read_pc,
5422 linux_write_pc,
5423 linux_thread_stopped,
5424 NULL,
5425 linux_pause_all,
5426 linux_unpause_all,
5427 linux_cancel_breakpoints,
5428 linux_stabilize_threads,
5429 linux_install_fast_tracepoint_jump_pad,
5430 linux_emit_ops,
5431 linux_supports_disable_randomization,
5432 linux_get_min_fast_tracepoint_insn_len,
5433 linux_qxfer_libraries_svr4,
5434 };
5435
5436 static void
5437 linux_init_signals ()
5438 {
5439 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5440 to find what the cancel signal actually is. */
5441 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5442 signal (__SIGRTMIN+1, SIG_IGN);
5443 #endif
5444 }
5445
5446 void
5447 initialize_low (void)
5448 {
5449 struct sigaction sigchld_action;
5450 memset (&sigchld_action, 0, sizeof (sigchld_action));
5451 set_target_ops (&linux_target_ops);
5452 set_breakpoint_data (the_low_target.breakpoint,
5453 the_low_target.breakpoint_len);
5454 linux_init_signals ();
5455 linux_test_for_tracefork ();
5456 #ifdef HAVE_LINUX_REGSETS
5457 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5458 ;
5459 disabled_regsets = xmalloc (num_regsets);
5460 #endif
5461
5462 sigchld_action.sa_handler = sigchld_handler;
5463 sigemptyset (&sigchld_action.sa_mask);
5464 sigchld_action.sa_flags = SA_RESTART;
5465 sigaction (SIGCHLD, &sigchld_action, NULL);
5466 }
This page took 0.148818 seconds and 4 git commands to generate.