45aeb731c75434bae1443de4aa0dbd19880f9a76
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include "linux-ptrace.h"
28 #include "linux-procfs.h"
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #ifndef ELFMAG0
46 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
50 #include <elf.h>
51 #endif
52
53 #ifndef SPUFS_MAGIC
54 #define SPUFS_MAGIC 0x23c9b64e
55 #endif
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif
63
64 #ifndef O_LARGEFILE
65 #define O_LARGEFILE 0
66 #endif
67
68 #ifndef W_STOPCODE
69 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
70 #endif
71
72 /* This is the kernel's hard limit. Not to be confused with
73 SIGRTMIN. */
74 #ifndef __SIGRTMIN
75 #define __SIGRTMIN 32
76 #endif
77
78 #ifdef __UCLIBC__
79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
80 #define HAS_NOMMU
81 #endif
82 #endif
83
84 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
85 representation of the thread ID.
86
87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
88 the same as the LWP ID.
89
90 ``all_processes'' is keyed by the "overall process ID", which
91 GNU/Linux calls tgid, "thread group ID". */
92
93 struct inferior_list all_lwps;
94
95 /* A list of all unknown processes which receive stop signals. Some other
96 process will presumably claim each of these as forked children
97 momentarily. */
98
99 struct inferior_list stopped_pids;
100
101 /* FIXME this is a bit of a hack, and could be removed. */
102 int stopping_threads;
103
104 /* FIXME make into a target method? */
105 int using_threads = 1;
106
107 /* True if we're presently stabilizing threads (moving them out of
108 jump pads). */
109 static int stabilizing_threads;
110
111 /* This flag is true iff we've just created or attached to our first
112 inferior but it has not stopped yet. As soon as it does, we need
113 to call the low target's arch_setup callback. Doing this only on
114 the first inferior avoids reinializing the architecture on every
115 inferior, and avoids messing with the register caches of the
116 already running inferiors. NOTE: this assumes all inferiors under
117 control of gdbserver have the same architecture. */
118 static int new_inferior;
119
120 static void linux_resume_one_lwp (struct lwp_info *lwp,
121 int step, int signal, siginfo_t *info);
122 static void linux_resume (struct thread_resume *resume_info, size_t n);
123 static void stop_all_lwps (int suspend, struct lwp_info *except);
124 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
125 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
126 static void *add_lwp (ptid_t ptid);
127 static int linux_stopped_by_watchpoint (void);
128 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
129 static void proceed_all_lwps (void);
130 static int finish_step_over (struct lwp_info *lwp);
131 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
132 static int kill_lwp (unsigned long lwpid, int signo);
133 static void linux_enable_event_reporting (int pid);
134
135 /* True if the low target can hardware single-step. Such targets
136 don't need a BREAKPOINT_REINSERT_ADDR callback. */
137
138 static int
139 can_hardware_single_step (void)
140 {
141 return (the_low_target.breakpoint_reinsert_addr == NULL);
142 }
143
144 /* True if the low target supports memory breakpoints. If so, we'll
145 have a GET_PC implementation. */
146
147 static int
148 supports_breakpoints (void)
149 {
150 return (the_low_target.get_pc != NULL);
151 }
152
153 /* Returns true if this target can support fast tracepoints. This
154 does not mean that the in-process agent has been loaded in the
155 inferior. */
156
157 static int
158 supports_fast_tracepoints (void)
159 {
160 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
161 }
162
163 struct pending_signals
164 {
165 int signal;
166 siginfo_t info;
167 struct pending_signals *prev;
168 };
169
170 #define PTRACE_ARG3_TYPE void *
171 #define PTRACE_ARG4_TYPE void *
172 #define PTRACE_XFER_TYPE long
173
174 #ifdef HAVE_LINUX_REGSETS
175 static char *disabled_regsets;
176 static int num_regsets;
177 #endif
178
179 /* The read/write ends of the pipe registered as waitable file in the
180 event loop. */
181 static int linux_event_pipe[2] = { -1, -1 };
182
183 /* True if we're currently in async mode. */
184 #define target_is_async_p() (linux_event_pipe[0] != -1)
185
186 static void send_sigstop (struct lwp_info *lwp);
187 static void wait_for_sigstop (struct inferior_list_entry *entry);
188
189 /* Accepts an integer PID; Returns a string representing a file that
190 can be opened to get info for the child process.
191 Space for the result is malloc'd, caller must free. */
192
193 char *
194 linux_child_pid_to_exec_file (int pid)
195 {
196 char *name1, *name2;
197
198 name1 = xmalloc (MAXPATHLEN);
199 name2 = xmalloc (MAXPATHLEN);
200 memset (name2, 0, MAXPATHLEN);
201
202 sprintf (name1, "/proc/%d/exe", pid);
203 if (readlink (name1, name2, MAXPATHLEN) > 0)
204 {
205 free (name1);
206 return name2;
207 }
208 else
209 {
210 free (name2);
211 return name1;
212 }
213 }
214
215 /* Return non-zero if HEADER is a 64-bit ELF file. */
216
217 static int
218 elf_64_header_p (const Elf64_Ehdr *header)
219 {
220 return (header->e_ident[EI_MAG0] == ELFMAG0
221 && header->e_ident[EI_MAG1] == ELFMAG1
222 && header->e_ident[EI_MAG2] == ELFMAG2
223 && header->e_ident[EI_MAG3] == ELFMAG3
224 && header->e_ident[EI_CLASS] == ELFCLASS64);
225 }
226
227 /* Return non-zero if FILE is a 64-bit ELF file,
228 zero if the file is not a 64-bit ELF file,
229 and -1 if the file is not accessible or doesn't exist. */
230
231 int
232 elf_64_file_p (const char *file)
233 {
234 Elf64_Ehdr header;
235 int fd;
236
237 fd = open (file, O_RDONLY);
238 if (fd < 0)
239 return -1;
240
241 if (read (fd, &header, sizeof (header)) != sizeof (header))
242 {
243 close (fd);
244 return 0;
245 }
246 close (fd);
247
248 return elf_64_header_p (&header);
249 }
250
251 static void
252 delete_lwp (struct lwp_info *lwp)
253 {
254 remove_thread (get_lwp_thread (lwp));
255 remove_inferior (&all_lwps, &lwp->head);
256 free (lwp->arch_private);
257 free (lwp);
258 }
259
260 /* Add a process to the common process list, and set its private
261 data. */
262
263 static struct process_info *
264 linux_add_process (int pid, int attached)
265 {
266 struct process_info *proc;
267
268 /* Is this the first process? If so, then set the arch. */
269 if (all_processes.head == NULL)
270 new_inferior = 1;
271
272 proc = add_process (pid, attached);
273 proc->private = xcalloc (1, sizeof (*proc->private));
274
275 if (the_low_target.new_process != NULL)
276 proc->private->arch_private = the_low_target.new_process ();
277
278 return proc;
279 }
280
281 /* Wrapper function for waitpid which handles EINTR, and emulates
282 __WALL for systems where that is not available. */
283
284 static int
285 my_waitpid (int pid, int *status, int flags)
286 {
287 int ret, out_errno;
288
289 if (debug_threads)
290 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
291
292 if (flags & __WALL)
293 {
294 sigset_t block_mask, org_mask, wake_mask;
295 int wnohang;
296
297 wnohang = (flags & WNOHANG) != 0;
298 flags &= ~(__WALL | __WCLONE);
299 flags |= WNOHANG;
300
301 /* Block all signals while here. This avoids knowing about
302 LinuxThread's signals. */
303 sigfillset (&block_mask);
304 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
305
306 /* ... except during the sigsuspend below. */
307 sigemptyset (&wake_mask);
308
309 while (1)
310 {
311 /* Since all signals are blocked, there's no need to check
312 for EINTR here. */
313 ret = waitpid (pid, status, flags);
314 out_errno = errno;
315
316 if (ret == -1 && out_errno != ECHILD)
317 break;
318 else if (ret > 0)
319 break;
320
321 if (flags & __WCLONE)
322 {
323 /* We've tried both flavors now. If WNOHANG is set,
324 there's nothing else to do, just bail out. */
325 if (wnohang)
326 break;
327
328 if (debug_threads)
329 fprintf (stderr, "blocking\n");
330
331 /* Block waiting for signals. */
332 sigsuspend (&wake_mask);
333 }
334
335 flags ^= __WCLONE;
336 }
337
338 sigprocmask (SIG_SETMASK, &org_mask, NULL);
339 }
340 else
341 {
342 do
343 ret = waitpid (pid, status, flags);
344 while (ret == -1 && errno == EINTR);
345 out_errno = errno;
346 }
347
348 if (debug_threads)
349 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
350 pid, flags, status ? *status : -1, ret);
351
352 errno = out_errno;
353 return ret;
354 }
355
356 /* Handle a GNU/Linux extended wait response. If we see a clone
357 event, we need to add the new LWP to our list (and not report the
358 trap to higher layers). */
359
360 static void
361 handle_extended_wait (struct lwp_info *event_child, int wstat)
362 {
363 int event = wstat >> 16;
364 struct lwp_info *new_lwp;
365
366 if (event == PTRACE_EVENT_CLONE)
367 {
368 ptid_t ptid;
369 unsigned long new_pid;
370 int ret, status = W_STOPCODE (SIGSTOP);
371
372 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
373
374 /* If we haven't already seen the new PID stop, wait for it now. */
375 if (! pull_pid_from_list (&stopped_pids, new_pid))
376 {
377 /* The new child has a pending SIGSTOP. We can't affect it until it
378 hits the SIGSTOP, but we're already attached. */
379
380 ret = my_waitpid (new_pid, &status, __WALL);
381
382 if (ret == -1)
383 perror_with_name ("waiting for new child");
384 else if (ret != new_pid)
385 warning ("wait returned unexpected PID %d", ret);
386 else if (!WIFSTOPPED (status))
387 warning ("wait returned unexpected status 0x%x", status);
388 }
389
390 linux_enable_event_reporting (new_pid);
391
392 ptid = ptid_build (pid_of (event_child), new_pid, 0);
393 new_lwp = (struct lwp_info *) add_lwp (ptid);
394 add_thread (ptid, new_lwp);
395
396 /* Either we're going to immediately resume the new thread
397 or leave it stopped. linux_resume_one_lwp is a nop if it
398 thinks the thread is currently running, so set this first
399 before calling linux_resume_one_lwp. */
400 new_lwp->stopped = 1;
401
402 /* Normally we will get the pending SIGSTOP. But in some cases
403 we might get another signal delivered to the group first.
404 If we do get another signal, be sure not to lose it. */
405 if (WSTOPSIG (status) == SIGSTOP)
406 {
407 if (stopping_threads)
408 new_lwp->stop_pc = get_stop_pc (new_lwp);
409 else
410 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
411 }
412 else
413 {
414 new_lwp->stop_expected = 1;
415
416 if (stopping_threads)
417 {
418 new_lwp->stop_pc = get_stop_pc (new_lwp);
419 new_lwp->status_pending_p = 1;
420 new_lwp->status_pending = status;
421 }
422 else
423 /* Pass the signal on. This is what GDB does - except
424 shouldn't we really report it instead? */
425 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
426 }
427
428 /* Always resume the current thread. If we are stopping
429 threads, it will have a pending SIGSTOP; we may as well
430 collect it now. */
431 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
432 }
433 }
434
435 /* Return the PC as read from the regcache of LWP, without any
436 adjustment. */
437
438 static CORE_ADDR
439 get_pc (struct lwp_info *lwp)
440 {
441 struct thread_info *saved_inferior;
442 struct regcache *regcache;
443 CORE_ADDR pc;
444
445 if (the_low_target.get_pc == NULL)
446 return 0;
447
448 saved_inferior = current_inferior;
449 current_inferior = get_lwp_thread (lwp);
450
451 regcache = get_thread_regcache (current_inferior, 1);
452 pc = (*the_low_target.get_pc) (regcache);
453
454 if (debug_threads)
455 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
456
457 current_inferior = saved_inferior;
458 return pc;
459 }
460
461 /* This function should only be called if LWP got a SIGTRAP.
462 The SIGTRAP could mean several things.
463
464 On i386, where decr_pc_after_break is non-zero:
465 If we were single-stepping this process using PTRACE_SINGLESTEP,
466 we will get only the one SIGTRAP (even if the instruction we
467 stepped over was a breakpoint). The value of $eip will be the
468 next instruction.
469 If we continue the process using PTRACE_CONT, we will get a
470 SIGTRAP when we hit a breakpoint. The value of $eip will be
471 the instruction after the breakpoint (i.e. needs to be
472 decremented). If we report the SIGTRAP to GDB, we must also
473 report the undecremented PC. If we cancel the SIGTRAP, we
474 must resume at the decremented PC.
475
476 (Presumably, not yet tested) On a non-decr_pc_after_break machine
477 with hardware or kernel single-step:
478 If we single-step over a breakpoint instruction, our PC will
479 point at the following instruction. If we continue and hit a
480 breakpoint instruction, our PC will point at the breakpoint
481 instruction. */
482
483 static CORE_ADDR
484 get_stop_pc (struct lwp_info *lwp)
485 {
486 CORE_ADDR stop_pc;
487
488 if (the_low_target.get_pc == NULL)
489 return 0;
490
491 stop_pc = get_pc (lwp);
492
493 if (WSTOPSIG (lwp->last_status) == SIGTRAP
494 && !lwp->stepping
495 && !lwp->stopped_by_watchpoint
496 && lwp->last_status >> 16 == 0)
497 stop_pc -= the_low_target.decr_pc_after_break;
498
499 if (debug_threads)
500 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
501
502 return stop_pc;
503 }
504
505 static void *
506 add_lwp (ptid_t ptid)
507 {
508 struct lwp_info *lwp;
509
510 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
511 memset (lwp, 0, sizeof (*lwp));
512
513 lwp->head.id = ptid;
514
515 if (the_low_target.new_thread != NULL)
516 lwp->arch_private = the_low_target.new_thread ();
517
518 add_inferior_to_list (&all_lwps, &lwp->head);
519
520 return lwp;
521 }
522
523 /* Start an inferior process and returns its pid.
524 ALLARGS is a vector of program-name and args. */
525
526 static int
527 linux_create_inferior (char *program, char **allargs)
528 {
529 #ifdef HAVE_PERSONALITY
530 int personality_orig = 0, personality_set = 0;
531 #endif
532 struct lwp_info *new_lwp;
533 int pid;
534 ptid_t ptid;
535
536 #ifdef HAVE_PERSONALITY
537 if (disable_randomization)
538 {
539 errno = 0;
540 personality_orig = personality (0xffffffff);
541 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
542 {
543 personality_set = 1;
544 personality (personality_orig | ADDR_NO_RANDOMIZE);
545 }
546 if (errno != 0 || (personality_set
547 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
548 warning ("Error disabling address space randomization: %s",
549 strerror (errno));
550 }
551 #endif
552
553 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
554 pid = vfork ();
555 #else
556 pid = fork ();
557 #endif
558 if (pid < 0)
559 perror_with_name ("fork");
560
561 if (pid == 0)
562 {
563 ptrace (PTRACE_TRACEME, 0, 0, 0);
564
565 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
566 signal (__SIGRTMIN + 1, SIG_DFL);
567 #endif
568
569 setpgid (0, 0);
570
571 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
572 stdout to stderr so that inferior i/o doesn't corrupt the connection.
573 Also, redirect stdin to /dev/null. */
574 if (remote_connection_is_stdio ())
575 {
576 close (0);
577 open ("/dev/null", O_RDONLY);
578 dup2 (2, 1);
579 if (write (2, "stdin/stdout redirected\n",
580 sizeof ("stdin/stdout redirected\n") - 1) < 0)
581 /* Errors ignored. */;
582 }
583
584 execv (program, allargs);
585 if (errno == ENOENT)
586 execvp (program, allargs);
587
588 fprintf (stderr, "Cannot exec %s: %s.\n", program,
589 strerror (errno));
590 fflush (stderr);
591 _exit (0177);
592 }
593
594 #ifdef HAVE_PERSONALITY
595 if (personality_set)
596 {
597 errno = 0;
598 personality (personality_orig);
599 if (errno != 0)
600 warning ("Error restoring address space randomization: %s",
601 strerror (errno));
602 }
603 #endif
604
605 linux_add_process (pid, 0);
606
607 ptid = ptid_build (pid, pid, 0);
608 new_lwp = add_lwp (ptid);
609 add_thread (ptid, new_lwp);
610 new_lwp->must_set_ptrace_flags = 1;
611
612 return pid;
613 }
614
615 /* Attach to an inferior process. */
616
617 static void
618 linux_attach_lwp_1 (unsigned long lwpid, int initial)
619 {
620 ptid_t ptid;
621 struct lwp_info *new_lwp;
622
623 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
624 {
625 if (!initial)
626 {
627 /* If we fail to attach to an LWP, just warn. */
628 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
629 strerror (errno), errno);
630 fflush (stderr);
631 return;
632 }
633 else
634 /* If we fail to attach to a process, report an error. */
635 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
636 strerror (errno), errno);
637 }
638
639 if (initial)
640 /* If lwp is the tgid, we handle adding existing threads later.
641 Otherwise we just add lwp without bothering about any other
642 threads. */
643 ptid = ptid_build (lwpid, lwpid, 0);
644 else
645 {
646 /* Note that extracting the pid from the current inferior is
647 safe, since we're always called in the context of the same
648 process as this new thread. */
649 int pid = pid_of (get_thread_lwp (current_inferior));
650 ptid = ptid_build (pid, lwpid, 0);
651 }
652
653 new_lwp = (struct lwp_info *) add_lwp (ptid);
654 add_thread (ptid, new_lwp);
655
656 /* We need to wait for SIGSTOP before being able to make the next
657 ptrace call on this LWP. */
658 new_lwp->must_set_ptrace_flags = 1;
659
660 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
661 brings it to a halt.
662
663 There are several cases to consider here:
664
665 1) gdbserver has already attached to the process and is being notified
666 of a new thread that is being created.
667 In this case we should ignore that SIGSTOP and resume the
668 process. This is handled below by setting stop_expected = 1,
669 and the fact that add_thread sets last_resume_kind ==
670 resume_continue.
671
672 2) This is the first thread (the process thread), and we're attaching
673 to it via attach_inferior.
674 In this case we want the process thread to stop.
675 This is handled by having linux_attach set last_resume_kind ==
676 resume_stop after we return.
677
678 If the pid we are attaching to is also the tgid, we attach to and
679 stop all the existing threads. Otherwise, we attach to pid and
680 ignore any other threads in the same group as this pid.
681
682 3) GDB is connecting to gdbserver and is requesting an enumeration of all
683 existing threads.
684 In this case we want the thread to stop.
685 FIXME: This case is currently not properly handled.
686 We should wait for the SIGSTOP but don't. Things work apparently
687 because enough time passes between when we ptrace (ATTACH) and when
688 gdb makes the next ptrace call on the thread.
689
690 On the other hand, if we are currently trying to stop all threads, we
691 should treat the new thread as if we had sent it a SIGSTOP. This works
692 because we are guaranteed that the add_lwp call above added us to the
693 end of the list, and so the new thread has not yet reached
694 wait_for_sigstop (but will). */
695 new_lwp->stop_expected = 1;
696 }
697
698 void
699 linux_attach_lwp (unsigned long lwpid)
700 {
701 linux_attach_lwp_1 (lwpid, 0);
702 }
703
704 /* Attach to PID. If PID is the tgid, attach to it and all
705 of its threads. */
706
707 int
708 linux_attach (unsigned long pid)
709 {
710 /* Attach to PID. We will check for other threads
711 soon. */
712 linux_attach_lwp_1 (pid, 1);
713 linux_add_process (pid, 1);
714
715 if (!non_stop)
716 {
717 struct thread_info *thread;
718
719 /* Don't ignore the initial SIGSTOP if we just attached to this
720 process. It will be collected by wait shortly. */
721 thread = find_thread_ptid (ptid_build (pid, pid, 0));
722 thread->last_resume_kind = resume_stop;
723 }
724
725 if (linux_proc_get_tgid (pid) == pid)
726 {
727 DIR *dir;
728 char pathname[128];
729
730 sprintf (pathname, "/proc/%ld/task", pid);
731
732 dir = opendir (pathname);
733
734 if (!dir)
735 {
736 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
737 fflush (stderr);
738 }
739 else
740 {
741 /* At this point we attached to the tgid. Scan the task for
742 existing threads. */
743 unsigned long lwp;
744 int new_threads_found;
745 int iterations = 0;
746 struct dirent *dp;
747
748 while (iterations < 2)
749 {
750 new_threads_found = 0;
751 /* Add all the other threads. While we go through the
752 threads, new threads may be spawned. Cycle through
753 the list of threads until we have done two iterations without
754 finding new threads. */
755 while ((dp = readdir (dir)) != NULL)
756 {
757 /* Fetch one lwp. */
758 lwp = strtoul (dp->d_name, NULL, 10);
759
760 /* Is this a new thread? */
761 if (lwp
762 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
763 {
764 linux_attach_lwp_1 (lwp, 0);
765 new_threads_found++;
766
767 if (debug_threads)
768 fprintf (stderr, "\
769 Found and attached to new lwp %ld\n", lwp);
770 }
771 }
772
773 if (!new_threads_found)
774 iterations++;
775 else
776 iterations = 0;
777
778 rewinddir (dir);
779 }
780 closedir (dir);
781 }
782 }
783
784 return 0;
785 }
786
787 struct counter
788 {
789 int pid;
790 int count;
791 };
792
793 static int
794 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
795 {
796 struct counter *counter = args;
797
798 if (ptid_get_pid (entry->id) == counter->pid)
799 {
800 if (++counter->count > 1)
801 return 1;
802 }
803
804 return 0;
805 }
806
807 static int
808 last_thread_of_process_p (struct thread_info *thread)
809 {
810 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
811 int pid = ptid_get_pid (ptid);
812 struct counter counter = { pid , 0 };
813
814 return (find_inferior (&all_threads,
815 second_thread_of_pid_p, &counter) == NULL);
816 }
817
818 /* Kill the inferior lwp. */
819
820 static int
821 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
822 {
823 struct thread_info *thread = (struct thread_info *) entry;
824 struct lwp_info *lwp = get_thread_lwp (thread);
825 int wstat;
826 int pid = * (int *) args;
827
828 if (ptid_get_pid (entry->id) != pid)
829 return 0;
830
831 /* We avoid killing the first thread here, because of a Linux kernel (at
832 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
833 the children get a chance to be reaped, it will remain a zombie
834 forever. */
835
836 if (lwpid_of (lwp) == pid)
837 {
838 if (debug_threads)
839 fprintf (stderr, "lkop: is last of process %s\n",
840 target_pid_to_str (entry->id));
841 return 0;
842 }
843
844 do
845 {
846 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
847
848 /* Make sure it died. The loop is most likely unnecessary. */
849 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
850 } while (pid > 0 && WIFSTOPPED (wstat));
851
852 return 0;
853 }
854
855 static int
856 linux_kill (int pid)
857 {
858 struct process_info *process;
859 struct lwp_info *lwp;
860 int wstat;
861 int lwpid;
862
863 process = find_process_pid (pid);
864 if (process == NULL)
865 return -1;
866
867 /* If we're killing a running inferior, make sure it is stopped
868 first, as PTRACE_KILL will not work otherwise. */
869 stop_all_lwps (0, NULL);
870
871 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
872
873 /* See the comment in linux_kill_one_lwp. We did not kill the first
874 thread in the list, so do so now. */
875 lwp = find_lwp_pid (pid_to_ptid (pid));
876
877 if (lwp == NULL)
878 {
879 if (debug_threads)
880 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
881 lwpid_of (lwp), pid);
882 }
883 else
884 {
885 if (debug_threads)
886 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
887 lwpid_of (lwp), pid);
888
889 do
890 {
891 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
892
893 /* Make sure it died. The loop is most likely unnecessary. */
894 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
895 } while (lwpid > 0 && WIFSTOPPED (wstat));
896 }
897
898 the_target->mourn (process);
899
900 /* Since we presently can only stop all lwps of all processes, we
901 need to unstop lwps of other processes. */
902 unstop_all_lwps (0, NULL);
903 return 0;
904 }
905
906 static int
907 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
908 {
909 struct thread_info *thread = (struct thread_info *) entry;
910 struct lwp_info *lwp = get_thread_lwp (thread);
911 int pid = * (int *) args;
912
913 if (ptid_get_pid (entry->id) != pid)
914 return 0;
915
916 /* If this process is stopped but is expecting a SIGSTOP, then make
917 sure we take care of that now. This isn't absolutely guaranteed
918 to collect the SIGSTOP, but is fairly likely to. */
919 if (lwp->stop_expected)
920 {
921 int wstat;
922 /* Clear stop_expected, so that the SIGSTOP will be reported. */
923 lwp->stop_expected = 0;
924 linux_resume_one_lwp (lwp, 0, 0, NULL);
925 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
926 }
927
928 /* Flush any pending changes to the process's registers. */
929 regcache_invalidate_one ((struct inferior_list_entry *)
930 get_lwp_thread (lwp));
931
932 /* Finally, let it resume. */
933 if (the_low_target.prepare_to_resume != NULL)
934 the_low_target.prepare_to_resume (lwp);
935 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
936
937 delete_lwp (lwp);
938 return 0;
939 }
940
941 static int
942 linux_detach (int pid)
943 {
944 struct process_info *process;
945
946 process = find_process_pid (pid);
947 if (process == NULL)
948 return -1;
949
950 /* Stop all threads before detaching. First, ptrace requires that
951 the thread is stopped to sucessfully detach. Second, thread_db
952 may need to uninstall thread event breakpoints from memory, which
953 only works with a stopped process anyway. */
954 stop_all_lwps (0, NULL);
955
956 #ifdef USE_THREAD_DB
957 thread_db_detach (process);
958 #endif
959
960 /* Stabilize threads (move out of jump pads). */
961 stabilize_threads ();
962
963 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
964
965 the_target->mourn (process);
966
967 /* Since we presently can only stop all lwps of all processes, we
968 need to unstop lwps of other processes. */
969 unstop_all_lwps (0, NULL);
970 return 0;
971 }
972
973 /* Remove all LWPs that belong to process PROC from the lwp list. */
974
975 static int
976 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
977 {
978 struct lwp_info *lwp = (struct lwp_info *) entry;
979 struct process_info *process = proc;
980
981 if (pid_of (lwp) == pid_of (process))
982 delete_lwp (lwp);
983
984 return 0;
985 }
986
987 static void
988 linux_mourn (struct process_info *process)
989 {
990 struct process_info_private *priv;
991
992 #ifdef USE_THREAD_DB
993 thread_db_mourn (process);
994 #endif
995
996 find_inferior (&all_lwps, delete_lwp_callback, process);
997
998 /* Freeing all private data. */
999 priv = process->private;
1000 free (priv->arch_private);
1001 free (priv);
1002 process->private = NULL;
1003
1004 remove_process (process);
1005 }
1006
1007 static void
1008 linux_join (int pid)
1009 {
1010 int status, ret;
1011
1012 do {
1013 ret = my_waitpid (pid, &status, 0);
1014 if (WIFEXITED (status) || WIFSIGNALED (status))
1015 break;
1016 } while (ret != -1 || errno != ECHILD);
1017 }
1018
1019 /* Return nonzero if the given thread is still alive. */
1020 static int
1021 linux_thread_alive (ptid_t ptid)
1022 {
1023 struct lwp_info *lwp = find_lwp_pid (ptid);
1024
1025 /* We assume we always know if a thread exits. If a whole process
1026 exited but we still haven't been able to report it to GDB, we'll
1027 hold on to the last lwp of the dead process. */
1028 if (lwp != NULL)
1029 return !lwp->dead;
1030 else
1031 return 0;
1032 }
1033
1034 /* Return 1 if this lwp has an interesting status pending. */
1035 static int
1036 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1037 {
1038 struct lwp_info *lwp = (struct lwp_info *) entry;
1039 ptid_t ptid = * (ptid_t *) arg;
1040 struct thread_info *thread;
1041
1042 /* Check if we're only interested in events from a specific process
1043 or its lwps. */
1044 if (!ptid_equal (minus_one_ptid, ptid)
1045 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1046 return 0;
1047
1048 thread = get_lwp_thread (lwp);
1049
1050 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1051 report any status pending the LWP may have. */
1052 if (thread->last_resume_kind == resume_stop
1053 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1054 return 0;
1055
1056 return lwp->status_pending_p;
1057 }
1058
1059 static int
1060 same_lwp (struct inferior_list_entry *entry, void *data)
1061 {
1062 ptid_t ptid = *(ptid_t *) data;
1063 int lwp;
1064
1065 if (ptid_get_lwp (ptid) != 0)
1066 lwp = ptid_get_lwp (ptid);
1067 else
1068 lwp = ptid_get_pid (ptid);
1069
1070 if (ptid_get_lwp (entry->id) == lwp)
1071 return 1;
1072
1073 return 0;
1074 }
1075
1076 struct lwp_info *
1077 find_lwp_pid (ptid_t ptid)
1078 {
1079 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1080 }
1081
1082 static struct lwp_info *
1083 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1084 {
1085 int ret;
1086 int to_wait_for = -1;
1087 struct lwp_info *child = NULL;
1088
1089 if (debug_threads)
1090 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1091
1092 if (ptid_equal (ptid, minus_one_ptid))
1093 to_wait_for = -1; /* any child */
1094 else
1095 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1096
1097 options |= __WALL;
1098
1099 retry:
1100
1101 ret = my_waitpid (to_wait_for, wstatp, options);
1102 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1103 return NULL;
1104 else if (ret == -1)
1105 perror_with_name ("waitpid");
1106
1107 if (debug_threads
1108 && (!WIFSTOPPED (*wstatp)
1109 || (WSTOPSIG (*wstatp) != 32
1110 && WSTOPSIG (*wstatp) != 33)))
1111 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1112
1113 child = find_lwp_pid (pid_to_ptid (ret));
1114
1115 /* If we didn't find a process, one of two things presumably happened:
1116 - A process we started and then detached from has exited. Ignore it.
1117 - A process we are controlling has forked and the new child's stop
1118 was reported to us by the kernel. Save its PID. */
1119 if (child == NULL && WIFSTOPPED (*wstatp))
1120 {
1121 add_pid_to_list (&stopped_pids, ret);
1122 goto retry;
1123 }
1124 else if (child == NULL)
1125 goto retry;
1126
1127 child->stopped = 1;
1128
1129 child->last_status = *wstatp;
1130
1131 /* Architecture-specific setup after inferior is running.
1132 This needs to happen after we have attached to the inferior
1133 and it is stopped for the first time, but before we access
1134 any inferior registers. */
1135 if (new_inferior)
1136 {
1137 the_low_target.arch_setup ();
1138 #ifdef HAVE_LINUX_REGSETS
1139 memset (disabled_regsets, 0, num_regsets);
1140 #endif
1141 new_inferior = 0;
1142 }
1143
1144 /* Fetch the possibly triggered data watchpoint info and store it in
1145 CHILD.
1146
1147 On some archs, like x86, that use debug registers to set
1148 watchpoints, it's possible that the way to know which watched
1149 address trapped, is to check the register that is used to select
1150 which address to watch. Problem is, between setting the
1151 watchpoint and reading back which data address trapped, the user
1152 may change the set of watchpoints, and, as a consequence, GDB
1153 changes the debug registers in the inferior. To avoid reading
1154 back a stale stopped-data-address when that happens, we cache in
1155 LP the fact that a watchpoint trapped, and the corresponding data
1156 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1157 changes the debug registers meanwhile, we have the cached data we
1158 can rely on. */
1159
1160 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1161 {
1162 if (the_low_target.stopped_by_watchpoint == NULL)
1163 {
1164 child->stopped_by_watchpoint = 0;
1165 }
1166 else
1167 {
1168 struct thread_info *saved_inferior;
1169
1170 saved_inferior = current_inferior;
1171 current_inferior = get_lwp_thread (child);
1172
1173 child->stopped_by_watchpoint
1174 = the_low_target.stopped_by_watchpoint ();
1175
1176 if (child->stopped_by_watchpoint)
1177 {
1178 if (the_low_target.stopped_data_address != NULL)
1179 child->stopped_data_address
1180 = the_low_target.stopped_data_address ();
1181 else
1182 child->stopped_data_address = 0;
1183 }
1184
1185 current_inferior = saved_inferior;
1186 }
1187 }
1188
1189 /* Store the STOP_PC, with adjustment applied. This depends on the
1190 architecture being defined already (so that CHILD has a valid
1191 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1192 not). */
1193 if (WIFSTOPPED (*wstatp))
1194 child->stop_pc = get_stop_pc (child);
1195
1196 if (debug_threads
1197 && WIFSTOPPED (*wstatp)
1198 && the_low_target.get_pc != NULL)
1199 {
1200 struct thread_info *saved_inferior = current_inferior;
1201 struct regcache *regcache;
1202 CORE_ADDR pc;
1203
1204 current_inferior = get_lwp_thread (child);
1205 regcache = get_thread_regcache (current_inferior, 1);
1206 pc = (*the_low_target.get_pc) (regcache);
1207 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1208 current_inferior = saved_inferior;
1209 }
1210
1211 return child;
1212 }
1213
1214 /* This function should only be called if the LWP got a SIGTRAP.
1215
1216 Handle any tracepoint steps or hits. Return true if a tracepoint
1217 event was handled, 0 otherwise. */
1218
1219 static int
1220 handle_tracepoints (struct lwp_info *lwp)
1221 {
1222 struct thread_info *tinfo = get_lwp_thread (lwp);
1223 int tpoint_related_event = 0;
1224
1225 /* If this tracepoint hit causes a tracing stop, we'll immediately
1226 uninsert tracepoints. To do this, we temporarily pause all
1227 threads, unpatch away, and then unpause threads. We need to make
1228 sure the unpausing doesn't resume LWP too. */
1229 lwp->suspended++;
1230
1231 /* And we need to be sure that any all-threads-stopping doesn't try
1232 to move threads out of the jump pads, as it could deadlock the
1233 inferior (LWP could be in the jump pad, maybe even holding the
1234 lock.) */
1235
1236 /* Do any necessary step collect actions. */
1237 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1238
1239 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1240
1241 /* See if we just hit a tracepoint and do its main collect
1242 actions. */
1243 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1244
1245 lwp->suspended--;
1246
1247 gdb_assert (lwp->suspended == 0);
1248 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1249
1250 if (tpoint_related_event)
1251 {
1252 if (debug_threads)
1253 fprintf (stderr, "got a tracepoint event\n");
1254 return 1;
1255 }
1256
1257 return 0;
1258 }
1259
1260 /* Convenience wrapper. Returns true if LWP is presently collecting a
1261 fast tracepoint. */
1262
1263 static int
1264 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1265 struct fast_tpoint_collect_status *status)
1266 {
1267 CORE_ADDR thread_area;
1268
1269 if (the_low_target.get_thread_area == NULL)
1270 return 0;
1271
1272 /* Get the thread area address. This is used to recognize which
1273 thread is which when tracing with the in-process agent library.
1274 We don't read anything from the address, and treat it as opaque;
1275 it's the address itself that we assume is unique per-thread. */
1276 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1277 return 0;
1278
1279 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1280 }
1281
1282 /* The reason we resume in the caller, is because we want to be able
1283 to pass lwp->status_pending as WSTAT, and we need to clear
1284 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1285 refuses to resume. */
1286
1287 static int
1288 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1289 {
1290 struct thread_info *saved_inferior;
1291
1292 saved_inferior = current_inferior;
1293 current_inferior = get_lwp_thread (lwp);
1294
1295 if ((wstat == NULL
1296 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1297 && supports_fast_tracepoints ()
1298 && in_process_agent_loaded ())
1299 {
1300 struct fast_tpoint_collect_status status;
1301 int r;
1302
1303 if (debug_threads)
1304 fprintf (stderr, "\
1305 Checking whether LWP %ld needs to move out of the jump pad.\n",
1306 lwpid_of (lwp));
1307
1308 r = linux_fast_tracepoint_collecting (lwp, &status);
1309
1310 if (wstat == NULL
1311 || (WSTOPSIG (*wstat) != SIGILL
1312 && WSTOPSIG (*wstat) != SIGFPE
1313 && WSTOPSIG (*wstat) != SIGSEGV
1314 && WSTOPSIG (*wstat) != SIGBUS))
1315 {
1316 lwp->collecting_fast_tracepoint = r;
1317
1318 if (r != 0)
1319 {
1320 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1321 {
1322 /* Haven't executed the original instruction yet.
1323 Set breakpoint there, and wait till it's hit,
1324 then single-step until exiting the jump pad. */
1325 lwp->exit_jump_pad_bkpt
1326 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1327 }
1328
1329 if (debug_threads)
1330 fprintf (stderr, "\
1331 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1332 lwpid_of (lwp));
1333 current_inferior = saved_inferior;
1334
1335 return 1;
1336 }
1337 }
1338 else
1339 {
1340 /* If we get a synchronous signal while collecting, *and*
1341 while executing the (relocated) original instruction,
1342 reset the PC to point at the tpoint address, before
1343 reporting to GDB. Otherwise, it's an IPA lib bug: just
1344 report the signal to GDB, and pray for the best. */
1345
1346 lwp->collecting_fast_tracepoint = 0;
1347
1348 if (r != 0
1349 && (status.adjusted_insn_addr <= lwp->stop_pc
1350 && lwp->stop_pc < status.adjusted_insn_addr_end))
1351 {
1352 siginfo_t info;
1353 struct regcache *regcache;
1354
1355 /* The si_addr on a few signals references the address
1356 of the faulting instruction. Adjust that as
1357 well. */
1358 if ((WSTOPSIG (*wstat) == SIGILL
1359 || WSTOPSIG (*wstat) == SIGFPE
1360 || WSTOPSIG (*wstat) == SIGBUS
1361 || WSTOPSIG (*wstat) == SIGSEGV)
1362 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1363 /* Final check just to make sure we don't clobber
1364 the siginfo of non-kernel-sent signals. */
1365 && (uintptr_t) info.si_addr == lwp->stop_pc)
1366 {
1367 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1368 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1369 }
1370
1371 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1372 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1373 lwp->stop_pc = status.tpoint_addr;
1374
1375 /* Cancel any fast tracepoint lock this thread was
1376 holding. */
1377 force_unlock_trace_buffer ();
1378 }
1379
1380 if (lwp->exit_jump_pad_bkpt != NULL)
1381 {
1382 if (debug_threads)
1383 fprintf (stderr,
1384 "Cancelling fast exit-jump-pad: removing bkpt. "
1385 "stopping all threads momentarily.\n");
1386
1387 stop_all_lwps (1, lwp);
1388 cancel_breakpoints ();
1389
1390 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1391 lwp->exit_jump_pad_bkpt = NULL;
1392
1393 unstop_all_lwps (1, lwp);
1394
1395 gdb_assert (lwp->suspended >= 0);
1396 }
1397 }
1398 }
1399
1400 if (debug_threads)
1401 fprintf (stderr, "\
1402 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1403 lwpid_of (lwp));
1404
1405 current_inferior = saved_inferior;
1406 return 0;
1407 }
1408
1409 /* Enqueue one signal in the "signals to report later when out of the
1410 jump pad" list. */
1411
1412 static void
1413 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1414 {
1415 struct pending_signals *p_sig;
1416
1417 if (debug_threads)
1418 fprintf (stderr, "\
1419 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1420
1421 if (debug_threads)
1422 {
1423 struct pending_signals *sig;
1424
1425 for (sig = lwp->pending_signals_to_report;
1426 sig != NULL;
1427 sig = sig->prev)
1428 fprintf (stderr,
1429 " Already queued %d\n",
1430 sig->signal);
1431
1432 fprintf (stderr, " (no more currently queued signals)\n");
1433 }
1434
1435 /* Don't enqueue non-RT signals if they are already in the deferred
1436 queue. (SIGSTOP being the easiest signal to see ending up here
1437 twice) */
1438 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1439 {
1440 struct pending_signals *sig;
1441
1442 for (sig = lwp->pending_signals_to_report;
1443 sig != NULL;
1444 sig = sig->prev)
1445 {
1446 if (sig->signal == WSTOPSIG (*wstat))
1447 {
1448 if (debug_threads)
1449 fprintf (stderr,
1450 "Not requeuing already queued non-RT signal %d"
1451 " for LWP %ld\n",
1452 sig->signal,
1453 lwpid_of (lwp));
1454 return;
1455 }
1456 }
1457 }
1458
1459 p_sig = xmalloc (sizeof (*p_sig));
1460 p_sig->prev = lwp->pending_signals_to_report;
1461 p_sig->signal = WSTOPSIG (*wstat);
1462 memset (&p_sig->info, 0, sizeof (siginfo_t));
1463 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1464
1465 lwp->pending_signals_to_report = p_sig;
1466 }
1467
1468 /* Dequeue one signal from the "signals to report later when out of
1469 the jump pad" list. */
1470
1471 static int
1472 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1473 {
1474 if (lwp->pending_signals_to_report != NULL)
1475 {
1476 struct pending_signals **p_sig;
1477
1478 p_sig = &lwp->pending_signals_to_report;
1479 while ((*p_sig)->prev != NULL)
1480 p_sig = &(*p_sig)->prev;
1481
1482 *wstat = W_STOPCODE ((*p_sig)->signal);
1483 if ((*p_sig)->info.si_signo != 0)
1484 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1485 free (*p_sig);
1486 *p_sig = NULL;
1487
1488 if (debug_threads)
1489 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1490 WSTOPSIG (*wstat), lwpid_of (lwp));
1491
1492 if (debug_threads)
1493 {
1494 struct pending_signals *sig;
1495
1496 for (sig = lwp->pending_signals_to_report;
1497 sig != NULL;
1498 sig = sig->prev)
1499 fprintf (stderr,
1500 " Still queued %d\n",
1501 sig->signal);
1502
1503 fprintf (stderr, " (no more queued signals)\n");
1504 }
1505
1506 return 1;
1507 }
1508
1509 return 0;
1510 }
1511
1512 /* Arrange for a breakpoint to be hit again later. We don't keep the
1513 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1514 will handle the current event, eventually we will resume this LWP,
1515 and this breakpoint will trap again. */
1516
1517 static int
1518 cancel_breakpoint (struct lwp_info *lwp)
1519 {
1520 struct thread_info *saved_inferior;
1521
1522 /* There's nothing to do if we don't support breakpoints. */
1523 if (!supports_breakpoints ())
1524 return 0;
1525
1526 /* breakpoint_at reads from current inferior. */
1527 saved_inferior = current_inferior;
1528 current_inferior = get_lwp_thread (lwp);
1529
1530 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1531 {
1532 if (debug_threads)
1533 fprintf (stderr,
1534 "CB: Push back breakpoint for %s\n",
1535 target_pid_to_str (ptid_of (lwp)));
1536
1537 /* Back up the PC if necessary. */
1538 if (the_low_target.decr_pc_after_break)
1539 {
1540 struct regcache *regcache
1541 = get_thread_regcache (current_inferior, 1);
1542 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1543 }
1544
1545 current_inferior = saved_inferior;
1546 return 1;
1547 }
1548 else
1549 {
1550 if (debug_threads)
1551 fprintf (stderr,
1552 "CB: No breakpoint found at %s for [%s]\n",
1553 paddress (lwp->stop_pc),
1554 target_pid_to_str (ptid_of (lwp)));
1555 }
1556
1557 current_inferior = saved_inferior;
1558 return 0;
1559 }
1560
1561 /* When the event-loop is doing a step-over, this points at the thread
1562 being stepped. */
1563 ptid_t step_over_bkpt;
1564
1565 /* Wait for an event from child PID. If PID is -1, wait for any
1566 child. Store the stop status through the status pointer WSTAT.
1567 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1568 event was found and OPTIONS contains WNOHANG. Return the PID of
1569 the stopped child otherwise. */
1570
1571 static int
1572 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1573 {
1574 struct lwp_info *event_child, *requested_child;
1575 ptid_t wait_ptid;
1576
1577 event_child = NULL;
1578 requested_child = NULL;
1579
1580 /* Check for a lwp with a pending status. */
1581
1582 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1583 {
1584 event_child = (struct lwp_info *)
1585 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1586 if (debug_threads && event_child)
1587 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1588 }
1589 else
1590 {
1591 requested_child = find_lwp_pid (ptid);
1592
1593 if (!stopping_threads
1594 && requested_child->status_pending_p
1595 && requested_child->collecting_fast_tracepoint)
1596 {
1597 enqueue_one_deferred_signal (requested_child,
1598 &requested_child->status_pending);
1599 requested_child->status_pending_p = 0;
1600 requested_child->status_pending = 0;
1601 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1602 }
1603
1604 if (requested_child->suspended
1605 && requested_child->status_pending_p)
1606 fatal ("requesting an event out of a suspended child?");
1607
1608 if (requested_child->status_pending_p)
1609 event_child = requested_child;
1610 }
1611
1612 if (event_child != NULL)
1613 {
1614 if (debug_threads)
1615 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1616 lwpid_of (event_child), event_child->status_pending);
1617 *wstat = event_child->status_pending;
1618 event_child->status_pending_p = 0;
1619 event_child->status_pending = 0;
1620 current_inferior = get_lwp_thread (event_child);
1621 return lwpid_of (event_child);
1622 }
1623
1624 if (ptid_is_pid (ptid))
1625 {
1626 /* A request to wait for a specific tgid. This is not possible
1627 with waitpid, so instead, we wait for any child, and leave
1628 children we're not interested in right now with a pending
1629 status to report later. */
1630 wait_ptid = minus_one_ptid;
1631 }
1632 else
1633 wait_ptid = ptid;
1634
1635 /* We only enter this loop if no process has a pending wait status. Thus
1636 any action taken in response to a wait status inside this loop is
1637 responding as soon as we detect the status, not after any pending
1638 events. */
1639 while (1)
1640 {
1641 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1642
1643 if ((options & WNOHANG) && event_child == NULL)
1644 {
1645 if (debug_threads)
1646 fprintf (stderr, "WNOHANG set, no event found\n");
1647 return 0;
1648 }
1649
1650 if (event_child == NULL)
1651 error ("event from unknown child");
1652
1653 if (ptid_is_pid (ptid)
1654 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1655 {
1656 if (! WIFSTOPPED (*wstat))
1657 mark_lwp_dead (event_child, *wstat);
1658 else
1659 {
1660 event_child->status_pending_p = 1;
1661 event_child->status_pending = *wstat;
1662 }
1663 continue;
1664 }
1665
1666 current_inferior = get_lwp_thread (event_child);
1667
1668 /* Check for thread exit. */
1669 if (! WIFSTOPPED (*wstat))
1670 {
1671 if (debug_threads)
1672 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1673
1674 /* If the last thread is exiting, just return. */
1675 if (last_thread_of_process_p (current_inferior))
1676 {
1677 if (debug_threads)
1678 fprintf (stderr, "LWP %ld is last lwp of process\n",
1679 lwpid_of (event_child));
1680 return lwpid_of (event_child);
1681 }
1682
1683 if (!non_stop)
1684 {
1685 current_inferior = (struct thread_info *) all_threads.head;
1686 if (debug_threads)
1687 fprintf (stderr, "Current inferior is now %ld\n",
1688 lwpid_of (get_thread_lwp (current_inferior)));
1689 }
1690 else
1691 {
1692 current_inferior = NULL;
1693 if (debug_threads)
1694 fprintf (stderr, "Current inferior is now <NULL>\n");
1695 }
1696
1697 /* If we were waiting for this particular child to do something...
1698 well, it did something. */
1699 if (requested_child != NULL)
1700 {
1701 int lwpid = lwpid_of (event_child);
1702
1703 /* Cancel the step-over operation --- the thread that
1704 started it is gone. */
1705 if (finish_step_over (event_child))
1706 unstop_all_lwps (1, event_child);
1707 delete_lwp (event_child);
1708 return lwpid;
1709 }
1710
1711 delete_lwp (event_child);
1712
1713 /* Wait for a more interesting event. */
1714 continue;
1715 }
1716
1717 if (event_child->must_set_ptrace_flags)
1718 {
1719 linux_enable_event_reporting (lwpid_of (event_child));
1720 event_child->must_set_ptrace_flags = 0;
1721 }
1722
1723 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1724 && *wstat >> 16 != 0)
1725 {
1726 handle_extended_wait (event_child, *wstat);
1727 continue;
1728 }
1729
1730 if (WIFSTOPPED (*wstat)
1731 && WSTOPSIG (*wstat) == SIGSTOP
1732 && event_child->stop_expected)
1733 {
1734 int should_stop;
1735
1736 if (debug_threads)
1737 fprintf (stderr, "Expected stop.\n");
1738 event_child->stop_expected = 0;
1739
1740 should_stop = (current_inferior->last_resume_kind == resume_stop
1741 || stopping_threads);
1742
1743 if (!should_stop)
1744 {
1745 linux_resume_one_lwp (event_child,
1746 event_child->stepping, 0, NULL);
1747 continue;
1748 }
1749 }
1750
1751 return lwpid_of (event_child);
1752 }
1753
1754 /* NOTREACHED */
1755 return 0;
1756 }
1757
1758 /* Count the LWP's that have had events. */
1759
1760 static int
1761 count_events_callback (struct inferior_list_entry *entry, void *data)
1762 {
1763 struct lwp_info *lp = (struct lwp_info *) entry;
1764 struct thread_info *thread = get_lwp_thread (lp);
1765 int *count = data;
1766
1767 gdb_assert (count != NULL);
1768
1769 /* Count only resumed LWPs that have a SIGTRAP event pending that
1770 should be reported to GDB. */
1771 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1772 && thread->last_resume_kind != resume_stop
1773 && lp->status_pending_p
1774 && WIFSTOPPED (lp->status_pending)
1775 && WSTOPSIG (lp->status_pending) == SIGTRAP
1776 && !breakpoint_inserted_here (lp->stop_pc))
1777 (*count)++;
1778
1779 return 0;
1780 }
1781
1782 /* Select the LWP (if any) that is currently being single-stepped. */
1783
1784 static int
1785 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1786 {
1787 struct lwp_info *lp = (struct lwp_info *) entry;
1788 struct thread_info *thread = get_lwp_thread (lp);
1789
1790 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1791 && thread->last_resume_kind == resume_step
1792 && lp->status_pending_p)
1793 return 1;
1794 else
1795 return 0;
1796 }
1797
1798 /* Select the Nth LWP that has had a SIGTRAP event that should be
1799 reported to GDB. */
1800
1801 static int
1802 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1803 {
1804 struct lwp_info *lp = (struct lwp_info *) entry;
1805 struct thread_info *thread = get_lwp_thread (lp);
1806 int *selector = data;
1807
1808 gdb_assert (selector != NULL);
1809
1810 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1811 if (thread->last_resume_kind != resume_stop
1812 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1813 && lp->status_pending_p
1814 && WIFSTOPPED (lp->status_pending)
1815 && WSTOPSIG (lp->status_pending) == SIGTRAP
1816 && !breakpoint_inserted_here (lp->stop_pc))
1817 if ((*selector)-- == 0)
1818 return 1;
1819
1820 return 0;
1821 }
1822
1823 static int
1824 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1825 {
1826 struct lwp_info *lp = (struct lwp_info *) entry;
1827 struct thread_info *thread = get_lwp_thread (lp);
1828 struct lwp_info *event_lp = data;
1829
1830 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1831 if (lp == event_lp)
1832 return 0;
1833
1834 /* If a LWP other than the LWP that we're reporting an event for has
1835 hit a GDB breakpoint (as opposed to some random trap signal),
1836 then just arrange for it to hit it again later. We don't keep
1837 the SIGTRAP status and don't forward the SIGTRAP signal to the
1838 LWP. We will handle the current event, eventually we will resume
1839 all LWPs, and this one will get its breakpoint trap again.
1840
1841 If we do not do this, then we run the risk that the user will
1842 delete or disable the breakpoint, but the LWP will have already
1843 tripped on it. */
1844
1845 if (thread->last_resume_kind != resume_stop
1846 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1847 && lp->status_pending_p
1848 && WIFSTOPPED (lp->status_pending)
1849 && WSTOPSIG (lp->status_pending) == SIGTRAP
1850 && !lp->stepping
1851 && !lp->stopped_by_watchpoint
1852 && cancel_breakpoint (lp))
1853 /* Throw away the SIGTRAP. */
1854 lp->status_pending_p = 0;
1855
1856 return 0;
1857 }
1858
1859 static void
1860 linux_cancel_breakpoints (void)
1861 {
1862 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1863 }
1864
1865 /* Select one LWP out of those that have events pending. */
1866
1867 static void
1868 select_event_lwp (struct lwp_info **orig_lp)
1869 {
1870 int num_events = 0;
1871 int random_selector;
1872 struct lwp_info *event_lp;
1873
1874 /* Give preference to any LWP that is being single-stepped. */
1875 event_lp
1876 = (struct lwp_info *) find_inferior (&all_lwps,
1877 select_singlestep_lwp_callback, NULL);
1878 if (event_lp != NULL)
1879 {
1880 if (debug_threads)
1881 fprintf (stderr,
1882 "SEL: Select single-step %s\n",
1883 target_pid_to_str (ptid_of (event_lp)));
1884 }
1885 else
1886 {
1887 /* No single-stepping LWP. Select one at random, out of those
1888 which have had SIGTRAP events. */
1889
1890 /* First see how many SIGTRAP events we have. */
1891 find_inferior (&all_lwps, count_events_callback, &num_events);
1892
1893 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1894 random_selector = (int)
1895 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1896
1897 if (debug_threads && num_events > 1)
1898 fprintf (stderr,
1899 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1900 num_events, random_selector);
1901
1902 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1903 select_event_lwp_callback,
1904 &random_selector);
1905 }
1906
1907 if (event_lp != NULL)
1908 {
1909 /* Switch the event LWP. */
1910 *orig_lp = event_lp;
1911 }
1912 }
1913
1914 /* Decrement the suspend count of an LWP. */
1915
1916 static int
1917 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1918 {
1919 struct lwp_info *lwp = (struct lwp_info *) entry;
1920
1921 /* Ignore EXCEPT. */
1922 if (lwp == except)
1923 return 0;
1924
1925 lwp->suspended--;
1926
1927 gdb_assert (lwp->suspended >= 0);
1928 return 0;
1929 }
1930
1931 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1932 NULL. */
1933
1934 static void
1935 unsuspend_all_lwps (struct lwp_info *except)
1936 {
1937 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1938 }
1939
1940 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1941 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1942 void *data);
1943 static int lwp_running (struct inferior_list_entry *entry, void *data);
1944 static ptid_t linux_wait_1 (ptid_t ptid,
1945 struct target_waitstatus *ourstatus,
1946 int target_options);
1947
1948 /* Stabilize threads (move out of jump pads).
1949
1950 If a thread is midway collecting a fast tracepoint, we need to
1951 finish the collection and move it out of the jump pad before
1952 reporting the signal.
1953
1954 This avoids recursion while collecting (when a signal arrives
1955 midway, and the signal handler itself collects), which would trash
1956 the trace buffer. In case the user set a breakpoint in a signal
1957 handler, this avoids the backtrace showing the jump pad, etc..
1958 Most importantly, there are certain things we can't do safely if
1959 threads are stopped in a jump pad (or in its callee's). For
1960 example:
1961
1962 - starting a new trace run. A thread still collecting the
1963 previous run, could trash the trace buffer when resumed. The trace
1964 buffer control structures would have been reset but the thread had
1965 no way to tell. The thread could even midway memcpy'ing to the
1966 buffer, which would mean that when resumed, it would clobber the
1967 trace buffer that had been set for a new run.
1968
1969 - we can't rewrite/reuse the jump pads for new tracepoints
1970 safely. Say you do tstart while a thread is stopped midway while
1971 collecting. When the thread is later resumed, it finishes the
1972 collection, and returns to the jump pad, to execute the original
1973 instruction that was under the tracepoint jump at the time the
1974 older run had been started. If the jump pad had been rewritten
1975 since for something else in the new run, the thread would now
1976 execute the wrong / random instructions. */
1977
1978 static void
1979 linux_stabilize_threads (void)
1980 {
1981 struct thread_info *save_inferior;
1982 struct lwp_info *lwp_stuck;
1983
1984 lwp_stuck
1985 = (struct lwp_info *) find_inferior (&all_lwps,
1986 stuck_in_jump_pad_callback, NULL);
1987 if (lwp_stuck != NULL)
1988 {
1989 if (debug_threads)
1990 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1991 lwpid_of (lwp_stuck));
1992 return;
1993 }
1994
1995 save_inferior = current_inferior;
1996
1997 stabilizing_threads = 1;
1998
1999 /* Kick 'em all. */
2000 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2001
2002 /* Loop until all are stopped out of the jump pads. */
2003 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2004 {
2005 struct target_waitstatus ourstatus;
2006 struct lwp_info *lwp;
2007 int wstat;
2008
2009 /* Note that we go through the full wait even loop. While
2010 moving threads out of jump pad, we need to be able to step
2011 over internal breakpoints and such. */
2012 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2013
2014 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2015 {
2016 lwp = get_thread_lwp (current_inferior);
2017
2018 /* Lock it. */
2019 lwp->suspended++;
2020
2021 if (ourstatus.value.sig != TARGET_SIGNAL_0
2022 || current_inferior->last_resume_kind == resume_stop)
2023 {
2024 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2025 enqueue_one_deferred_signal (lwp, &wstat);
2026 }
2027 }
2028 }
2029
2030 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2031
2032 stabilizing_threads = 0;
2033
2034 current_inferior = save_inferior;
2035
2036 if (debug_threads)
2037 {
2038 lwp_stuck
2039 = (struct lwp_info *) find_inferior (&all_lwps,
2040 stuck_in_jump_pad_callback, NULL);
2041 if (lwp_stuck != NULL)
2042 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2043 lwpid_of (lwp_stuck));
2044 }
2045 }
2046
2047 /* Wait for process, returns status. */
2048
2049 static ptid_t
2050 linux_wait_1 (ptid_t ptid,
2051 struct target_waitstatus *ourstatus, int target_options)
2052 {
2053 int w;
2054 struct lwp_info *event_child;
2055 int options;
2056 int pid;
2057 int step_over_finished;
2058 int bp_explains_trap;
2059 int maybe_internal_trap;
2060 int report_to_gdb;
2061 int trace_event;
2062
2063 /* Translate generic target options into linux options. */
2064 options = __WALL;
2065 if (target_options & TARGET_WNOHANG)
2066 options |= WNOHANG;
2067
2068 retry:
2069 bp_explains_trap = 0;
2070 trace_event = 0;
2071 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2072
2073 /* If we were only supposed to resume one thread, only wait for
2074 that thread - if it's still alive. If it died, however - which
2075 can happen if we're coming from the thread death case below -
2076 then we need to make sure we restart the other threads. We could
2077 pick a thread at random or restart all; restarting all is less
2078 arbitrary. */
2079 if (!non_stop
2080 && !ptid_equal (cont_thread, null_ptid)
2081 && !ptid_equal (cont_thread, minus_one_ptid))
2082 {
2083 struct thread_info *thread;
2084
2085 thread = (struct thread_info *) find_inferior_id (&all_threads,
2086 cont_thread);
2087
2088 /* No stepping, no signal - unless one is pending already, of course. */
2089 if (thread == NULL)
2090 {
2091 struct thread_resume resume_info;
2092 resume_info.thread = minus_one_ptid;
2093 resume_info.kind = resume_continue;
2094 resume_info.sig = 0;
2095 linux_resume (&resume_info, 1);
2096 }
2097 else
2098 ptid = cont_thread;
2099 }
2100
2101 if (ptid_equal (step_over_bkpt, null_ptid))
2102 pid = linux_wait_for_event (ptid, &w, options);
2103 else
2104 {
2105 if (debug_threads)
2106 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2107 target_pid_to_str (step_over_bkpt));
2108 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2109 }
2110
2111 if (pid == 0) /* only if TARGET_WNOHANG */
2112 return null_ptid;
2113
2114 event_child = get_thread_lwp (current_inferior);
2115
2116 /* If we are waiting for a particular child, and it exited,
2117 linux_wait_for_event will return its exit status. Similarly if
2118 the last child exited. If this is not the last child, however,
2119 do not report it as exited until there is a 'thread exited' response
2120 available in the remote protocol. Instead, just wait for another event.
2121 This should be safe, because if the thread crashed we will already
2122 have reported the termination signal to GDB; that should stop any
2123 in-progress stepping operations, etc.
2124
2125 Report the exit status of the last thread to exit. This matches
2126 LinuxThreads' behavior. */
2127
2128 if (last_thread_of_process_p (current_inferior))
2129 {
2130 if (WIFEXITED (w) || WIFSIGNALED (w))
2131 {
2132 if (WIFEXITED (w))
2133 {
2134 ourstatus->kind = TARGET_WAITKIND_EXITED;
2135 ourstatus->value.integer = WEXITSTATUS (w);
2136
2137 if (debug_threads)
2138 fprintf (stderr,
2139 "\nChild exited with retcode = %x \n",
2140 WEXITSTATUS (w));
2141 }
2142 else
2143 {
2144 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2145 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2146
2147 if (debug_threads)
2148 fprintf (stderr,
2149 "\nChild terminated with signal = %x \n",
2150 WTERMSIG (w));
2151
2152 }
2153
2154 return ptid_of (event_child);
2155 }
2156 }
2157 else
2158 {
2159 if (!WIFSTOPPED (w))
2160 goto retry;
2161 }
2162
2163 /* If this event was not handled before, and is not a SIGTRAP, we
2164 report it. SIGILL and SIGSEGV are also treated as traps in case
2165 a breakpoint is inserted at the current PC. If this target does
2166 not support internal breakpoints at all, we also report the
2167 SIGTRAP without further processing; it's of no concern to us. */
2168 maybe_internal_trap
2169 = (supports_breakpoints ()
2170 && (WSTOPSIG (w) == SIGTRAP
2171 || ((WSTOPSIG (w) == SIGILL
2172 || WSTOPSIG (w) == SIGSEGV)
2173 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2174
2175 if (maybe_internal_trap)
2176 {
2177 /* Handle anything that requires bookkeeping before deciding to
2178 report the event or continue waiting. */
2179
2180 /* First check if we can explain the SIGTRAP with an internal
2181 breakpoint, or if we should possibly report the event to GDB.
2182 Do this before anything that may remove or insert a
2183 breakpoint. */
2184 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2185
2186 /* We have a SIGTRAP, possibly a step-over dance has just
2187 finished. If so, tweak the state machine accordingly,
2188 reinsert breakpoints and delete any reinsert (software
2189 single-step) breakpoints. */
2190 step_over_finished = finish_step_over (event_child);
2191
2192 /* Now invoke the callbacks of any internal breakpoints there. */
2193 check_breakpoints (event_child->stop_pc);
2194
2195 /* Handle tracepoint data collecting. This may overflow the
2196 trace buffer, and cause a tracing stop, removing
2197 breakpoints. */
2198 trace_event = handle_tracepoints (event_child);
2199
2200 if (bp_explains_trap)
2201 {
2202 /* If we stepped or ran into an internal breakpoint, we've
2203 already handled it. So next time we resume (from this
2204 PC), we should step over it. */
2205 if (debug_threads)
2206 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2207
2208 if (breakpoint_here (event_child->stop_pc))
2209 event_child->need_step_over = 1;
2210 }
2211 }
2212 else
2213 {
2214 /* We have some other signal, possibly a step-over dance was in
2215 progress, and it should be cancelled too. */
2216 step_over_finished = finish_step_over (event_child);
2217 }
2218
2219 /* We have all the data we need. Either report the event to GDB, or
2220 resume threads and keep waiting for more. */
2221
2222 /* If we're collecting a fast tracepoint, finish the collection and
2223 move out of the jump pad before delivering a signal. See
2224 linux_stabilize_threads. */
2225
2226 if (WIFSTOPPED (w)
2227 && WSTOPSIG (w) != SIGTRAP
2228 && supports_fast_tracepoints ()
2229 && in_process_agent_loaded ())
2230 {
2231 if (debug_threads)
2232 fprintf (stderr,
2233 "Got signal %d for LWP %ld. Check if we need "
2234 "to defer or adjust it.\n",
2235 WSTOPSIG (w), lwpid_of (event_child));
2236
2237 /* Allow debugging the jump pad itself. */
2238 if (current_inferior->last_resume_kind != resume_step
2239 && maybe_move_out_of_jump_pad (event_child, &w))
2240 {
2241 enqueue_one_deferred_signal (event_child, &w);
2242
2243 if (debug_threads)
2244 fprintf (stderr,
2245 "Signal %d for LWP %ld deferred (in jump pad)\n",
2246 WSTOPSIG (w), lwpid_of (event_child));
2247
2248 linux_resume_one_lwp (event_child, 0, 0, NULL);
2249 goto retry;
2250 }
2251 }
2252
2253 if (event_child->collecting_fast_tracepoint)
2254 {
2255 if (debug_threads)
2256 fprintf (stderr, "\
2257 LWP %ld was trying to move out of the jump pad (%d). \
2258 Check if we're already there.\n",
2259 lwpid_of (event_child),
2260 event_child->collecting_fast_tracepoint);
2261
2262 trace_event = 1;
2263
2264 event_child->collecting_fast_tracepoint
2265 = linux_fast_tracepoint_collecting (event_child, NULL);
2266
2267 if (event_child->collecting_fast_tracepoint != 1)
2268 {
2269 /* No longer need this breakpoint. */
2270 if (event_child->exit_jump_pad_bkpt != NULL)
2271 {
2272 if (debug_threads)
2273 fprintf (stderr,
2274 "No longer need exit-jump-pad bkpt; removing it."
2275 "stopping all threads momentarily.\n");
2276
2277 /* Other running threads could hit this breakpoint.
2278 We don't handle moribund locations like GDB does,
2279 instead we always pause all threads when removing
2280 breakpoints, so that any step-over or
2281 decr_pc_after_break adjustment is always taken
2282 care of while the breakpoint is still
2283 inserted. */
2284 stop_all_lwps (1, event_child);
2285 cancel_breakpoints ();
2286
2287 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2288 event_child->exit_jump_pad_bkpt = NULL;
2289
2290 unstop_all_lwps (1, event_child);
2291
2292 gdb_assert (event_child->suspended >= 0);
2293 }
2294 }
2295
2296 if (event_child->collecting_fast_tracepoint == 0)
2297 {
2298 if (debug_threads)
2299 fprintf (stderr,
2300 "fast tracepoint finished "
2301 "collecting successfully.\n");
2302
2303 /* We may have a deferred signal to report. */
2304 if (dequeue_one_deferred_signal (event_child, &w))
2305 {
2306 if (debug_threads)
2307 fprintf (stderr, "dequeued one signal.\n");
2308 }
2309 else
2310 {
2311 if (debug_threads)
2312 fprintf (stderr, "no deferred signals.\n");
2313
2314 if (stabilizing_threads)
2315 {
2316 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2317 ourstatus->value.sig = TARGET_SIGNAL_0;
2318 return ptid_of (event_child);
2319 }
2320 }
2321 }
2322 }
2323
2324 /* Check whether GDB would be interested in this event. */
2325
2326 /* If GDB is not interested in this signal, don't stop other
2327 threads, and don't report it to GDB. Just resume the inferior
2328 right away. We do this for threading-related signals as well as
2329 any that GDB specifically requested we ignore. But never ignore
2330 SIGSTOP if we sent it ourselves, and do not ignore signals when
2331 stepping - they may require special handling to skip the signal
2332 handler. */
2333 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2334 thread library? */
2335 if (WIFSTOPPED (w)
2336 && current_inferior->last_resume_kind != resume_step
2337 && (
2338 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2339 (current_process ()->private->thread_db != NULL
2340 && (WSTOPSIG (w) == __SIGRTMIN
2341 || WSTOPSIG (w) == __SIGRTMIN + 1))
2342 ||
2343 #endif
2344 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2345 && !(WSTOPSIG (w) == SIGSTOP
2346 && current_inferior->last_resume_kind == resume_stop))))
2347 {
2348 siginfo_t info, *info_p;
2349
2350 if (debug_threads)
2351 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2352 WSTOPSIG (w), lwpid_of (event_child));
2353
2354 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2355 info_p = &info;
2356 else
2357 info_p = NULL;
2358 linux_resume_one_lwp (event_child, event_child->stepping,
2359 WSTOPSIG (w), info_p);
2360 goto retry;
2361 }
2362
2363 /* If GDB wanted this thread to single step, we always want to
2364 report the SIGTRAP, and let GDB handle it. Watchpoints should
2365 always be reported. So should signals we can't explain. A
2366 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2367 not support Z0 breakpoints. If we do, we're be able to handle
2368 GDB breakpoints on top of internal breakpoints, by handling the
2369 internal breakpoint and still reporting the event to GDB. If we
2370 don't, we're out of luck, GDB won't see the breakpoint hit. */
2371 report_to_gdb = (!maybe_internal_trap
2372 || current_inferior->last_resume_kind == resume_step
2373 || event_child->stopped_by_watchpoint
2374 || (!step_over_finished
2375 && !bp_explains_trap && !trace_event)
2376 || gdb_breakpoint_here (event_child->stop_pc));
2377
2378 /* We found no reason GDB would want us to stop. We either hit one
2379 of our own breakpoints, or finished an internal step GDB
2380 shouldn't know about. */
2381 if (!report_to_gdb)
2382 {
2383 if (debug_threads)
2384 {
2385 if (bp_explains_trap)
2386 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2387 if (step_over_finished)
2388 fprintf (stderr, "Step-over finished.\n");
2389 if (trace_event)
2390 fprintf (stderr, "Tracepoint event.\n");
2391 }
2392
2393 /* We're not reporting this breakpoint to GDB, so apply the
2394 decr_pc_after_break adjustment to the inferior's regcache
2395 ourselves. */
2396
2397 if (the_low_target.set_pc != NULL)
2398 {
2399 struct regcache *regcache
2400 = get_thread_regcache (get_lwp_thread (event_child), 1);
2401 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2402 }
2403
2404 /* We may have finished stepping over a breakpoint. If so,
2405 we've stopped and suspended all LWPs momentarily except the
2406 stepping one. This is where we resume them all again. We're
2407 going to keep waiting, so use proceed, which handles stepping
2408 over the next breakpoint. */
2409 if (debug_threads)
2410 fprintf (stderr, "proceeding all threads.\n");
2411
2412 if (step_over_finished)
2413 unsuspend_all_lwps (event_child);
2414
2415 proceed_all_lwps ();
2416 goto retry;
2417 }
2418
2419 if (debug_threads)
2420 {
2421 if (current_inferior->last_resume_kind == resume_step)
2422 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2423 if (event_child->stopped_by_watchpoint)
2424 fprintf (stderr, "Stopped by watchpoint.\n");
2425 if (gdb_breakpoint_here (event_child->stop_pc))
2426 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2427 if (debug_threads)
2428 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2429 }
2430
2431 /* Alright, we're going to report a stop. */
2432
2433 if (!non_stop && !stabilizing_threads)
2434 {
2435 /* In all-stop, stop all threads. */
2436 stop_all_lwps (0, NULL);
2437
2438 /* If we're not waiting for a specific LWP, choose an event LWP
2439 from among those that have had events. Giving equal priority
2440 to all LWPs that have had events helps prevent
2441 starvation. */
2442 if (ptid_equal (ptid, minus_one_ptid))
2443 {
2444 event_child->status_pending_p = 1;
2445 event_child->status_pending = w;
2446
2447 select_event_lwp (&event_child);
2448
2449 event_child->status_pending_p = 0;
2450 w = event_child->status_pending;
2451 }
2452
2453 /* Now that we've selected our final event LWP, cancel any
2454 breakpoints in other LWPs that have hit a GDB breakpoint.
2455 See the comment in cancel_breakpoints_callback to find out
2456 why. */
2457 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2458
2459 /* Stabilize threads (move out of jump pads). */
2460 stabilize_threads ();
2461 }
2462 else
2463 {
2464 /* If we just finished a step-over, then all threads had been
2465 momentarily paused. In all-stop, that's fine, we want
2466 threads stopped by now anyway. In non-stop, we need to
2467 re-resume threads that GDB wanted to be running. */
2468 if (step_over_finished)
2469 unstop_all_lwps (1, event_child);
2470 }
2471
2472 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2473
2474 if (current_inferior->last_resume_kind == resume_stop
2475 && WSTOPSIG (w) == SIGSTOP)
2476 {
2477 /* A thread that has been requested to stop by GDB with vCont;t,
2478 and it stopped cleanly, so report as SIG0. The use of
2479 SIGSTOP is an implementation detail. */
2480 ourstatus->value.sig = TARGET_SIGNAL_0;
2481 }
2482 else if (current_inferior->last_resume_kind == resume_stop
2483 && WSTOPSIG (w) != SIGSTOP)
2484 {
2485 /* A thread that has been requested to stop by GDB with vCont;t,
2486 but, it stopped for other reasons. */
2487 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2488 }
2489 else
2490 {
2491 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2492 }
2493
2494 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2495
2496 if (debug_threads)
2497 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2498 target_pid_to_str (ptid_of (event_child)),
2499 ourstatus->kind,
2500 ourstatus->value.sig);
2501
2502 return ptid_of (event_child);
2503 }
2504
2505 /* Get rid of any pending event in the pipe. */
2506 static void
2507 async_file_flush (void)
2508 {
2509 int ret;
2510 char buf;
2511
2512 do
2513 ret = read (linux_event_pipe[0], &buf, 1);
2514 while (ret >= 0 || (ret == -1 && errno == EINTR));
2515 }
2516
2517 /* Put something in the pipe, so the event loop wakes up. */
2518 static void
2519 async_file_mark (void)
2520 {
2521 int ret;
2522
2523 async_file_flush ();
2524
2525 do
2526 ret = write (linux_event_pipe[1], "+", 1);
2527 while (ret == 0 || (ret == -1 && errno == EINTR));
2528
2529 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2530 be awakened anyway. */
2531 }
2532
2533 static ptid_t
2534 linux_wait (ptid_t ptid,
2535 struct target_waitstatus *ourstatus, int target_options)
2536 {
2537 ptid_t event_ptid;
2538
2539 if (debug_threads)
2540 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2541
2542 /* Flush the async file first. */
2543 if (target_is_async_p ())
2544 async_file_flush ();
2545
2546 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2547
2548 /* If at least one stop was reported, there may be more. A single
2549 SIGCHLD can signal more than one child stop. */
2550 if (target_is_async_p ()
2551 && (target_options & TARGET_WNOHANG) != 0
2552 && !ptid_equal (event_ptid, null_ptid))
2553 async_file_mark ();
2554
2555 return event_ptid;
2556 }
2557
2558 /* Send a signal to an LWP. */
2559
2560 static int
2561 kill_lwp (unsigned long lwpid, int signo)
2562 {
2563 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2564 fails, then we are not using nptl threads and we should be using kill. */
2565
2566 #ifdef __NR_tkill
2567 {
2568 static int tkill_failed;
2569
2570 if (!tkill_failed)
2571 {
2572 int ret;
2573
2574 errno = 0;
2575 ret = syscall (__NR_tkill, lwpid, signo);
2576 if (errno != ENOSYS)
2577 return ret;
2578 tkill_failed = 1;
2579 }
2580 }
2581 #endif
2582
2583 return kill (lwpid, signo);
2584 }
2585
2586 void
2587 linux_stop_lwp (struct lwp_info *lwp)
2588 {
2589 send_sigstop (lwp);
2590 }
2591
2592 static void
2593 send_sigstop (struct lwp_info *lwp)
2594 {
2595 int pid;
2596
2597 pid = lwpid_of (lwp);
2598
2599 /* If we already have a pending stop signal for this process, don't
2600 send another. */
2601 if (lwp->stop_expected)
2602 {
2603 if (debug_threads)
2604 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2605
2606 return;
2607 }
2608
2609 if (debug_threads)
2610 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2611
2612 lwp->stop_expected = 1;
2613 kill_lwp (pid, SIGSTOP);
2614 }
2615
2616 static int
2617 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2618 {
2619 struct lwp_info *lwp = (struct lwp_info *) entry;
2620
2621 /* Ignore EXCEPT. */
2622 if (lwp == except)
2623 return 0;
2624
2625 if (lwp->stopped)
2626 return 0;
2627
2628 send_sigstop (lwp);
2629 return 0;
2630 }
2631
2632 /* Increment the suspend count of an LWP, and stop it, if not stopped
2633 yet. */
2634 static int
2635 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2636 void *except)
2637 {
2638 struct lwp_info *lwp = (struct lwp_info *) entry;
2639
2640 /* Ignore EXCEPT. */
2641 if (lwp == except)
2642 return 0;
2643
2644 lwp->suspended++;
2645
2646 return send_sigstop_callback (entry, except);
2647 }
2648
2649 static void
2650 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2651 {
2652 /* It's dead, really. */
2653 lwp->dead = 1;
2654
2655 /* Store the exit status for later. */
2656 lwp->status_pending_p = 1;
2657 lwp->status_pending = wstat;
2658
2659 /* Prevent trying to stop it. */
2660 lwp->stopped = 1;
2661
2662 /* No further stops are expected from a dead lwp. */
2663 lwp->stop_expected = 0;
2664 }
2665
2666 static void
2667 wait_for_sigstop (struct inferior_list_entry *entry)
2668 {
2669 struct lwp_info *lwp = (struct lwp_info *) entry;
2670 struct thread_info *saved_inferior;
2671 int wstat;
2672 ptid_t saved_tid;
2673 ptid_t ptid;
2674 int pid;
2675
2676 if (lwp->stopped)
2677 {
2678 if (debug_threads)
2679 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2680 lwpid_of (lwp));
2681 return;
2682 }
2683
2684 saved_inferior = current_inferior;
2685 if (saved_inferior != NULL)
2686 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2687 else
2688 saved_tid = null_ptid; /* avoid bogus unused warning */
2689
2690 ptid = lwp->head.id;
2691
2692 if (debug_threads)
2693 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2694
2695 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2696
2697 /* If we stopped with a non-SIGSTOP signal, save it for later
2698 and record the pending SIGSTOP. If the process exited, just
2699 return. */
2700 if (WIFSTOPPED (wstat))
2701 {
2702 if (debug_threads)
2703 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2704 lwpid_of (lwp), WSTOPSIG (wstat));
2705
2706 if (WSTOPSIG (wstat) != SIGSTOP)
2707 {
2708 if (debug_threads)
2709 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2710 lwpid_of (lwp), wstat);
2711
2712 lwp->status_pending_p = 1;
2713 lwp->status_pending = wstat;
2714 }
2715 }
2716 else
2717 {
2718 if (debug_threads)
2719 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2720
2721 lwp = find_lwp_pid (pid_to_ptid (pid));
2722 if (lwp)
2723 {
2724 /* Leave this status pending for the next time we're able to
2725 report it. In the mean time, we'll report this lwp as
2726 dead to GDB, so GDB doesn't try to read registers and
2727 memory from it. This can only happen if this was the
2728 last thread of the process; otherwise, PID is removed
2729 from the thread tables before linux_wait_for_event
2730 returns. */
2731 mark_lwp_dead (lwp, wstat);
2732 }
2733 }
2734
2735 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2736 current_inferior = saved_inferior;
2737 else
2738 {
2739 if (debug_threads)
2740 fprintf (stderr, "Previously current thread died.\n");
2741
2742 if (non_stop)
2743 {
2744 /* We can't change the current inferior behind GDB's back,
2745 otherwise, a subsequent command may apply to the wrong
2746 process. */
2747 current_inferior = NULL;
2748 }
2749 else
2750 {
2751 /* Set a valid thread as current. */
2752 set_desired_inferior (0);
2753 }
2754 }
2755 }
2756
2757 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2758 move it out, because we need to report the stop event to GDB. For
2759 example, if the user puts a breakpoint in the jump pad, it's
2760 because she wants to debug it. */
2761
2762 static int
2763 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2764 {
2765 struct lwp_info *lwp = (struct lwp_info *) entry;
2766 struct thread_info *thread = get_lwp_thread (lwp);
2767
2768 gdb_assert (lwp->suspended == 0);
2769 gdb_assert (lwp->stopped);
2770
2771 /* Allow debugging the jump pad, gdb_collect, etc.. */
2772 return (supports_fast_tracepoints ()
2773 && in_process_agent_loaded ()
2774 && (gdb_breakpoint_here (lwp->stop_pc)
2775 || lwp->stopped_by_watchpoint
2776 || thread->last_resume_kind == resume_step)
2777 && linux_fast_tracepoint_collecting (lwp, NULL));
2778 }
2779
2780 static void
2781 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2782 {
2783 struct lwp_info *lwp = (struct lwp_info *) entry;
2784 struct thread_info *thread = get_lwp_thread (lwp);
2785 int *wstat;
2786
2787 gdb_assert (lwp->suspended == 0);
2788 gdb_assert (lwp->stopped);
2789
2790 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2791
2792 /* Allow debugging the jump pad, gdb_collect, etc. */
2793 if (!gdb_breakpoint_here (lwp->stop_pc)
2794 && !lwp->stopped_by_watchpoint
2795 && thread->last_resume_kind != resume_step
2796 && maybe_move_out_of_jump_pad (lwp, wstat))
2797 {
2798 if (debug_threads)
2799 fprintf (stderr,
2800 "LWP %ld needs stabilizing (in jump pad)\n",
2801 lwpid_of (lwp));
2802
2803 if (wstat)
2804 {
2805 lwp->status_pending_p = 0;
2806 enqueue_one_deferred_signal (lwp, wstat);
2807
2808 if (debug_threads)
2809 fprintf (stderr,
2810 "Signal %d for LWP %ld deferred "
2811 "(in jump pad)\n",
2812 WSTOPSIG (*wstat), lwpid_of (lwp));
2813 }
2814
2815 linux_resume_one_lwp (lwp, 0, 0, NULL);
2816 }
2817 else
2818 lwp->suspended++;
2819 }
2820
2821 static int
2822 lwp_running (struct inferior_list_entry *entry, void *data)
2823 {
2824 struct lwp_info *lwp = (struct lwp_info *) entry;
2825
2826 if (lwp->dead)
2827 return 0;
2828 if (lwp->stopped)
2829 return 0;
2830 return 1;
2831 }
2832
2833 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2834 If SUSPEND, then also increase the suspend count of every LWP,
2835 except EXCEPT. */
2836
2837 static void
2838 stop_all_lwps (int suspend, struct lwp_info *except)
2839 {
2840 stopping_threads = 1;
2841
2842 if (suspend)
2843 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2844 else
2845 find_inferior (&all_lwps, send_sigstop_callback, except);
2846 for_each_inferior (&all_lwps, wait_for_sigstop);
2847 stopping_threads = 0;
2848 }
2849
2850 /* Resume execution of the inferior process.
2851 If STEP is nonzero, single-step it.
2852 If SIGNAL is nonzero, give it that signal. */
2853
2854 static void
2855 linux_resume_one_lwp (struct lwp_info *lwp,
2856 int step, int signal, siginfo_t *info)
2857 {
2858 struct thread_info *saved_inferior;
2859 int fast_tp_collecting;
2860
2861 if (lwp->stopped == 0)
2862 return;
2863
2864 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2865
2866 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2867
2868 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2869 user used the "jump" command, or "set $pc = foo"). */
2870 if (lwp->stop_pc != get_pc (lwp))
2871 {
2872 /* Collecting 'while-stepping' actions doesn't make sense
2873 anymore. */
2874 release_while_stepping_state_list (get_lwp_thread (lwp));
2875 }
2876
2877 /* If we have pending signals or status, and a new signal, enqueue the
2878 signal. Also enqueue the signal if we are waiting to reinsert a
2879 breakpoint; it will be picked up again below. */
2880 if (signal != 0
2881 && (lwp->status_pending_p
2882 || lwp->pending_signals != NULL
2883 || lwp->bp_reinsert != 0
2884 || fast_tp_collecting))
2885 {
2886 struct pending_signals *p_sig;
2887 p_sig = xmalloc (sizeof (*p_sig));
2888 p_sig->prev = lwp->pending_signals;
2889 p_sig->signal = signal;
2890 if (info == NULL)
2891 memset (&p_sig->info, 0, sizeof (siginfo_t));
2892 else
2893 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2894 lwp->pending_signals = p_sig;
2895 }
2896
2897 if (lwp->status_pending_p)
2898 {
2899 if (debug_threads)
2900 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2901 " has pending status\n",
2902 lwpid_of (lwp), step ? "step" : "continue", signal,
2903 lwp->stop_expected ? "expected" : "not expected");
2904 return;
2905 }
2906
2907 saved_inferior = current_inferior;
2908 current_inferior = get_lwp_thread (lwp);
2909
2910 if (debug_threads)
2911 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2912 lwpid_of (lwp), step ? "step" : "continue", signal,
2913 lwp->stop_expected ? "expected" : "not expected");
2914
2915 /* This bit needs some thinking about. If we get a signal that
2916 we must report while a single-step reinsert is still pending,
2917 we often end up resuming the thread. It might be better to
2918 (ew) allow a stack of pending events; then we could be sure that
2919 the reinsert happened right away and not lose any signals.
2920
2921 Making this stack would also shrink the window in which breakpoints are
2922 uninserted (see comment in linux_wait_for_lwp) but not enough for
2923 complete correctness, so it won't solve that problem. It may be
2924 worthwhile just to solve this one, however. */
2925 if (lwp->bp_reinsert != 0)
2926 {
2927 if (debug_threads)
2928 fprintf (stderr, " pending reinsert at 0x%s\n",
2929 paddress (lwp->bp_reinsert));
2930
2931 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2932 {
2933 if (fast_tp_collecting == 0)
2934 {
2935 if (step == 0)
2936 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2937 if (lwp->suspended)
2938 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2939 lwp->suspended);
2940 }
2941
2942 step = 1;
2943 }
2944
2945 /* Postpone any pending signal. It was enqueued above. */
2946 signal = 0;
2947 }
2948
2949 if (fast_tp_collecting == 1)
2950 {
2951 if (debug_threads)
2952 fprintf (stderr, "\
2953 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2954 lwpid_of (lwp));
2955
2956 /* Postpone any pending signal. It was enqueued above. */
2957 signal = 0;
2958 }
2959 else if (fast_tp_collecting == 2)
2960 {
2961 if (debug_threads)
2962 fprintf (stderr, "\
2963 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2964 lwpid_of (lwp));
2965
2966 if (can_hardware_single_step ())
2967 step = 1;
2968 else
2969 fatal ("moving out of jump pad single-stepping"
2970 " not implemented on this target");
2971
2972 /* Postpone any pending signal. It was enqueued above. */
2973 signal = 0;
2974 }
2975
2976 /* If we have while-stepping actions in this thread set it stepping.
2977 If we have a signal to deliver, it may or may not be set to
2978 SIG_IGN, we don't know. Assume so, and allow collecting
2979 while-stepping into a signal handler. A possible smart thing to
2980 do would be to set an internal breakpoint at the signal return
2981 address, continue, and carry on catching this while-stepping
2982 action only when that breakpoint is hit. A future
2983 enhancement. */
2984 if (get_lwp_thread (lwp)->while_stepping != NULL
2985 && can_hardware_single_step ())
2986 {
2987 if (debug_threads)
2988 fprintf (stderr,
2989 "lwp %ld has a while-stepping action -> forcing step.\n",
2990 lwpid_of (lwp));
2991 step = 1;
2992 }
2993
2994 if (debug_threads && the_low_target.get_pc != NULL)
2995 {
2996 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2997 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2998 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2999 }
3000
3001 /* If we have pending signals, consume one unless we are trying to
3002 reinsert a breakpoint or we're trying to finish a fast tracepoint
3003 collect. */
3004 if (lwp->pending_signals != NULL
3005 && lwp->bp_reinsert == 0
3006 && fast_tp_collecting == 0)
3007 {
3008 struct pending_signals **p_sig;
3009
3010 p_sig = &lwp->pending_signals;
3011 while ((*p_sig)->prev != NULL)
3012 p_sig = &(*p_sig)->prev;
3013
3014 signal = (*p_sig)->signal;
3015 if ((*p_sig)->info.si_signo != 0)
3016 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3017
3018 free (*p_sig);
3019 *p_sig = NULL;
3020 }
3021
3022 if (the_low_target.prepare_to_resume != NULL)
3023 the_low_target.prepare_to_resume (lwp);
3024
3025 regcache_invalidate_one ((struct inferior_list_entry *)
3026 get_lwp_thread (lwp));
3027 errno = 0;
3028 lwp->stopped = 0;
3029 lwp->stopped_by_watchpoint = 0;
3030 lwp->stepping = step;
3031 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3032 /* Coerce to a uintptr_t first to avoid potential gcc warning
3033 of coercing an 8 byte integer to a 4 byte pointer. */
3034 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3035
3036 current_inferior = saved_inferior;
3037 if (errno)
3038 {
3039 /* ESRCH from ptrace either means that the thread was already
3040 running (an error) or that it is gone (a race condition). If
3041 it's gone, we will get a notification the next time we wait,
3042 so we can ignore the error. We could differentiate these
3043 two, but it's tricky without waiting; the thread still exists
3044 as a zombie, so sending it signal 0 would succeed. So just
3045 ignore ESRCH. */
3046 if (errno == ESRCH)
3047 return;
3048
3049 perror_with_name ("ptrace");
3050 }
3051 }
3052
3053 struct thread_resume_array
3054 {
3055 struct thread_resume *resume;
3056 size_t n;
3057 };
3058
3059 /* This function is called once per thread. We look up the thread
3060 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3061 resume request.
3062
3063 This algorithm is O(threads * resume elements), but resume elements
3064 is small (and will remain small at least until GDB supports thread
3065 suspension). */
3066 static int
3067 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3068 {
3069 struct lwp_info *lwp;
3070 struct thread_info *thread;
3071 int ndx;
3072 struct thread_resume_array *r;
3073
3074 thread = (struct thread_info *) entry;
3075 lwp = get_thread_lwp (thread);
3076 r = arg;
3077
3078 for (ndx = 0; ndx < r->n; ndx++)
3079 {
3080 ptid_t ptid = r->resume[ndx].thread;
3081 if (ptid_equal (ptid, minus_one_ptid)
3082 || ptid_equal (ptid, entry->id)
3083 || (ptid_is_pid (ptid)
3084 && (ptid_get_pid (ptid) == pid_of (lwp)))
3085 || (ptid_get_lwp (ptid) == -1
3086 && (ptid_get_pid (ptid) == pid_of (lwp))))
3087 {
3088 if (r->resume[ndx].kind == resume_stop
3089 && thread->last_resume_kind == resume_stop)
3090 {
3091 if (debug_threads)
3092 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3093 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3094 ? "stopped"
3095 : "stopping",
3096 lwpid_of (lwp));
3097
3098 continue;
3099 }
3100
3101 lwp->resume = &r->resume[ndx];
3102 thread->last_resume_kind = lwp->resume->kind;
3103
3104 /* If we had a deferred signal to report, dequeue one now.
3105 This can happen if LWP gets more than one signal while
3106 trying to get out of a jump pad. */
3107 if (lwp->stopped
3108 && !lwp->status_pending_p
3109 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3110 {
3111 lwp->status_pending_p = 1;
3112
3113 if (debug_threads)
3114 fprintf (stderr,
3115 "Dequeueing deferred signal %d for LWP %ld, "
3116 "leaving status pending.\n",
3117 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3118 }
3119
3120 return 0;
3121 }
3122 }
3123
3124 /* No resume action for this thread. */
3125 lwp->resume = NULL;
3126
3127 return 0;
3128 }
3129
3130
3131 /* Set *FLAG_P if this lwp has an interesting status pending. */
3132 static int
3133 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3134 {
3135 struct lwp_info *lwp = (struct lwp_info *) entry;
3136
3137 /* LWPs which will not be resumed are not interesting, because
3138 we might not wait for them next time through linux_wait. */
3139 if (lwp->resume == NULL)
3140 return 0;
3141
3142 if (lwp->status_pending_p)
3143 * (int *) flag_p = 1;
3144
3145 return 0;
3146 }
3147
3148 /* Return 1 if this lwp that GDB wants running is stopped at an
3149 internal breakpoint that we need to step over. It assumes that any
3150 required STOP_PC adjustment has already been propagated to the
3151 inferior's regcache. */
3152
3153 static int
3154 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3155 {
3156 struct lwp_info *lwp = (struct lwp_info *) entry;
3157 struct thread_info *thread;
3158 struct thread_info *saved_inferior;
3159 CORE_ADDR pc;
3160
3161 /* LWPs which will not be resumed are not interesting, because we
3162 might not wait for them next time through linux_wait. */
3163
3164 if (!lwp->stopped)
3165 {
3166 if (debug_threads)
3167 fprintf (stderr,
3168 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3169 lwpid_of (lwp));
3170 return 0;
3171 }
3172
3173 thread = get_lwp_thread (lwp);
3174
3175 if (thread->last_resume_kind == resume_stop)
3176 {
3177 if (debug_threads)
3178 fprintf (stderr,
3179 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3180 lwpid_of (lwp));
3181 return 0;
3182 }
3183
3184 gdb_assert (lwp->suspended >= 0);
3185
3186 if (lwp->suspended)
3187 {
3188 if (debug_threads)
3189 fprintf (stderr,
3190 "Need step over [LWP %ld]? Ignoring, suspended\n",
3191 lwpid_of (lwp));
3192 return 0;
3193 }
3194
3195 if (!lwp->need_step_over)
3196 {
3197 if (debug_threads)
3198 fprintf (stderr,
3199 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3200 }
3201
3202 if (lwp->status_pending_p)
3203 {
3204 if (debug_threads)
3205 fprintf (stderr,
3206 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3207 lwpid_of (lwp));
3208 return 0;
3209 }
3210
3211 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3212 or we have. */
3213 pc = get_pc (lwp);
3214
3215 /* If the PC has changed since we stopped, then don't do anything,
3216 and let the breakpoint/tracepoint be hit. This happens if, for
3217 instance, GDB handled the decr_pc_after_break subtraction itself,
3218 GDB is OOL stepping this thread, or the user has issued a "jump"
3219 command, or poked thread's registers herself. */
3220 if (pc != lwp->stop_pc)
3221 {
3222 if (debug_threads)
3223 fprintf (stderr,
3224 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3225 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3226 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3227
3228 lwp->need_step_over = 0;
3229 return 0;
3230 }
3231
3232 saved_inferior = current_inferior;
3233 current_inferior = thread;
3234
3235 /* We can only step over breakpoints we know about. */
3236 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3237 {
3238 /* Don't step over a breakpoint that GDB expects to hit
3239 though. */
3240 if (gdb_breakpoint_here (pc))
3241 {
3242 if (debug_threads)
3243 fprintf (stderr,
3244 "Need step over [LWP %ld]? yes, but found"
3245 " GDB breakpoint at 0x%s; skipping step over\n",
3246 lwpid_of (lwp), paddress (pc));
3247
3248 current_inferior = saved_inferior;
3249 return 0;
3250 }
3251 else
3252 {
3253 if (debug_threads)
3254 fprintf (stderr,
3255 "Need step over [LWP %ld]? yes, "
3256 "found breakpoint at 0x%s\n",
3257 lwpid_of (lwp), paddress (pc));
3258
3259 /* We've found an lwp that needs stepping over --- return 1 so
3260 that find_inferior stops looking. */
3261 current_inferior = saved_inferior;
3262
3263 /* If the step over is cancelled, this is set again. */
3264 lwp->need_step_over = 0;
3265 return 1;
3266 }
3267 }
3268
3269 current_inferior = saved_inferior;
3270
3271 if (debug_threads)
3272 fprintf (stderr,
3273 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3274 lwpid_of (lwp), paddress (pc));
3275
3276 return 0;
3277 }
3278
3279 /* Start a step-over operation on LWP. When LWP stopped at a
3280 breakpoint, to make progress, we need to remove the breakpoint out
3281 of the way. If we let other threads run while we do that, they may
3282 pass by the breakpoint location and miss hitting it. To avoid
3283 that, a step-over momentarily stops all threads while LWP is
3284 single-stepped while the breakpoint is temporarily uninserted from
3285 the inferior. When the single-step finishes, we reinsert the
3286 breakpoint, and let all threads that are supposed to be running,
3287 run again.
3288
3289 On targets that don't support hardware single-step, we don't
3290 currently support full software single-stepping. Instead, we only
3291 support stepping over the thread event breakpoint, by asking the
3292 low target where to place a reinsert breakpoint. Since this
3293 routine assumes the breakpoint being stepped over is a thread event
3294 breakpoint, it usually assumes the return address of the current
3295 function is a good enough place to set the reinsert breakpoint. */
3296
3297 static int
3298 start_step_over (struct lwp_info *lwp)
3299 {
3300 struct thread_info *saved_inferior;
3301 CORE_ADDR pc;
3302 int step;
3303
3304 if (debug_threads)
3305 fprintf (stderr,
3306 "Starting step-over on LWP %ld. Stopping all threads\n",
3307 lwpid_of (lwp));
3308
3309 stop_all_lwps (1, lwp);
3310 gdb_assert (lwp->suspended == 0);
3311
3312 if (debug_threads)
3313 fprintf (stderr, "Done stopping all threads for step-over.\n");
3314
3315 /* Note, we should always reach here with an already adjusted PC,
3316 either by GDB (if we're resuming due to GDB's request), or by our
3317 caller, if we just finished handling an internal breakpoint GDB
3318 shouldn't care about. */
3319 pc = get_pc (lwp);
3320
3321 saved_inferior = current_inferior;
3322 current_inferior = get_lwp_thread (lwp);
3323
3324 lwp->bp_reinsert = pc;
3325 uninsert_breakpoints_at (pc);
3326 uninsert_fast_tracepoint_jumps_at (pc);
3327
3328 if (can_hardware_single_step ())
3329 {
3330 step = 1;
3331 }
3332 else
3333 {
3334 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3335 set_reinsert_breakpoint (raddr);
3336 step = 0;
3337 }
3338
3339 current_inferior = saved_inferior;
3340
3341 linux_resume_one_lwp (lwp, step, 0, NULL);
3342
3343 /* Require next event from this LWP. */
3344 step_over_bkpt = lwp->head.id;
3345 return 1;
3346 }
3347
3348 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3349 start_step_over, if still there, and delete any reinsert
3350 breakpoints we've set, on non hardware single-step targets. */
3351
3352 static int
3353 finish_step_over (struct lwp_info *lwp)
3354 {
3355 if (lwp->bp_reinsert != 0)
3356 {
3357 if (debug_threads)
3358 fprintf (stderr, "Finished step over.\n");
3359
3360 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3361 may be no breakpoint to reinsert there by now. */
3362 reinsert_breakpoints_at (lwp->bp_reinsert);
3363 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3364
3365 lwp->bp_reinsert = 0;
3366
3367 /* Delete any software-single-step reinsert breakpoints. No
3368 longer needed. We don't have to worry about other threads
3369 hitting this trap, and later not being able to explain it,
3370 because we were stepping over a breakpoint, and we hold all
3371 threads but LWP stopped while doing that. */
3372 if (!can_hardware_single_step ())
3373 delete_reinsert_breakpoints ();
3374
3375 step_over_bkpt = null_ptid;
3376 return 1;
3377 }
3378 else
3379 return 0;
3380 }
3381
3382 /* This function is called once per thread. We check the thread's resume
3383 request, which will tell us whether to resume, step, or leave the thread
3384 stopped; and what signal, if any, it should be sent.
3385
3386 For threads which we aren't explicitly told otherwise, we preserve
3387 the stepping flag; this is used for stepping over gdbserver-placed
3388 breakpoints.
3389
3390 If pending_flags was set in any thread, we queue any needed
3391 signals, since we won't actually resume. We already have a pending
3392 event to report, so we don't need to preserve any step requests;
3393 they should be re-issued if necessary. */
3394
3395 static int
3396 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3397 {
3398 struct lwp_info *lwp;
3399 struct thread_info *thread;
3400 int step;
3401 int leave_all_stopped = * (int *) arg;
3402 int leave_pending;
3403
3404 thread = (struct thread_info *) entry;
3405 lwp = get_thread_lwp (thread);
3406
3407 if (lwp->resume == NULL)
3408 return 0;
3409
3410 if (lwp->resume->kind == resume_stop)
3411 {
3412 if (debug_threads)
3413 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3414
3415 if (!lwp->stopped)
3416 {
3417 if (debug_threads)
3418 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3419
3420 /* Stop the thread, and wait for the event asynchronously,
3421 through the event loop. */
3422 send_sigstop (lwp);
3423 }
3424 else
3425 {
3426 if (debug_threads)
3427 fprintf (stderr, "already stopped LWP %ld\n",
3428 lwpid_of (lwp));
3429
3430 /* The LWP may have been stopped in an internal event that
3431 was not meant to be notified back to GDB (e.g., gdbserver
3432 breakpoint), so we should be reporting a stop event in
3433 this case too. */
3434
3435 /* If the thread already has a pending SIGSTOP, this is a
3436 no-op. Otherwise, something later will presumably resume
3437 the thread and this will cause it to cancel any pending
3438 operation, due to last_resume_kind == resume_stop. If
3439 the thread already has a pending status to report, we
3440 will still report it the next time we wait - see
3441 status_pending_p_callback. */
3442
3443 /* If we already have a pending signal to report, then
3444 there's no need to queue a SIGSTOP, as this means we're
3445 midway through moving the LWP out of the jumppad, and we
3446 will report the pending signal as soon as that is
3447 finished. */
3448 if (lwp->pending_signals_to_report == NULL)
3449 send_sigstop (lwp);
3450 }
3451
3452 /* For stop requests, we're done. */
3453 lwp->resume = NULL;
3454 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3455 return 0;
3456 }
3457
3458 /* If this thread which is about to be resumed has a pending status,
3459 then don't resume any threads - we can just report the pending
3460 status. Make sure to queue any signals that would otherwise be
3461 sent. In all-stop mode, we do this decision based on if *any*
3462 thread has a pending status. If there's a thread that needs the
3463 step-over-breakpoint dance, then don't resume any other thread
3464 but that particular one. */
3465 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3466
3467 if (!leave_pending)
3468 {
3469 if (debug_threads)
3470 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3471
3472 step = (lwp->resume->kind == resume_step);
3473 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3474 }
3475 else
3476 {
3477 if (debug_threads)
3478 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3479
3480 /* If we have a new signal, enqueue the signal. */
3481 if (lwp->resume->sig != 0)
3482 {
3483 struct pending_signals *p_sig;
3484 p_sig = xmalloc (sizeof (*p_sig));
3485 p_sig->prev = lwp->pending_signals;
3486 p_sig->signal = lwp->resume->sig;
3487 memset (&p_sig->info, 0, sizeof (siginfo_t));
3488
3489 /* If this is the same signal we were previously stopped by,
3490 make sure to queue its siginfo. We can ignore the return
3491 value of ptrace; if it fails, we'll skip
3492 PTRACE_SETSIGINFO. */
3493 if (WIFSTOPPED (lwp->last_status)
3494 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3495 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3496
3497 lwp->pending_signals = p_sig;
3498 }
3499 }
3500
3501 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3502 lwp->resume = NULL;
3503 return 0;
3504 }
3505
3506 static void
3507 linux_resume (struct thread_resume *resume_info, size_t n)
3508 {
3509 struct thread_resume_array array = { resume_info, n };
3510 struct lwp_info *need_step_over = NULL;
3511 int any_pending;
3512 int leave_all_stopped;
3513
3514 find_inferior (&all_threads, linux_set_resume_request, &array);
3515
3516 /* If there is a thread which would otherwise be resumed, which has
3517 a pending status, then don't resume any threads - we can just
3518 report the pending status. Make sure to queue any signals that
3519 would otherwise be sent. In non-stop mode, we'll apply this
3520 logic to each thread individually. We consume all pending events
3521 before considering to start a step-over (in all-stop). */
3522 any_pending = 0;
3523 if (!non_stop)
3524 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3525
3526 /* If there is a thread which would otherwise be resumed, which is
3527 stopped at a breakpoint that needs stepping over, then don't
3528 resume any threads - have it step over the breakpoint with all
3529 other threads stopped, then resume all threads again. Make sure
3530 to queue any signals that would otherwise be delivered or
3531 queued. */
3532 if (!any_pending && supports_breakpoints ())
3533 need_step_over
3534 = (struct lwp_info *) find_inferior (&all_lwps,
3535 need_step_over_p, NULL);
3536
3537 leave_all_stopped = (need_step_over != NULL || any_pending);
3538
3539 if (debug_threads)
3540 {
3541 if (need_step_over != NULL)
3542 fprintf (stderr, "Not resuming all, need step over\n");
3543 else if (any_pending)
3544 fprintf (stderr,
3545 "Not resuming, all-stop and found "
3546 "an LWP with pending status\n");
3547 else
3548 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3549 }
3550
3551 /* Even if we're leaving threads stopped, queue all signals we'd
3552 otherwise deliver. */
3553 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3554
3555 if (need_step_over)
3556 start_step_over (need_step_over);
3557 }
3558
3559 /* This function is called once per thread. We check the thread's
3560 last resume request, which will tell us whether to resume, step, or
3561 leave the thread stopped. Any signal the client requested to be
3562 delivered has already been enqueued at this point.
3563
3564 If any thread that GDB wants running is stopped at an internal
3565 breakpoint that needs stepping over, we start a step-over operation
3566 on that particular thread, and leave all others stopped. */
3567
3568 static int
3569 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3570 {
3571 struct lwp_info *lwp = (struct lwp_info *) entry;
3572 struct thread_info *thread;
3573 int step;
3574
3575 if (lwp == except)
3576 return 0;
3577
3578 if (debug_threads)
3579 fprintf (stderr,
3580 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3581
3582 if (!lwp->stopped)
3583 {
3584 if (debug_threads)
3585 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3586 return 0;
3587 }
3588
3589 thread = get_lwp_thread (lwp);
3590
3591 if (thread->last_resume_kind == resume_stop
3592 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3593 {
3594 if (debug_threads)
3595 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3596 lwpid_of (lwp));
3597 return 0;
3598 }
3599
3600 if (lwp->status_pending_p)
3601 {
3602 if (debug_threads)
3603 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3604 lwpid_of (lwp));
3605 return 0;
3606 }
3607
3608 gdb_assert (lwp->suspended >= 0);
3609
3610 if (lwp->suspended)
3611 {
3612 if (debug_threads)
3613 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3614 return 0;
3615 }
3616
3617 if (thread->last_resume_kind == resume_stop
3618 && lwp->pending_signals_to_report == NULL
3619 && lwp->collecting_fast_tracepoint == 0)
3620 {
3621 /* We haven't reported this LWP as stopped yet (otherwise, the
3622 last_status.kind check above would catch it, and we wouldn't
3623 reach here. This LWP may have been momentarily paused by a
3624 stop_all_lwps call while handling for example, another LWP's
3625 step-over. In that case, the pending expected SIGSTOP signal
3626 that was queued at vCont;t handling time will have already
3627 been consumed by wait_for_sigstop, and so we need to requeue
3628 another one here. Note that if the LWP already has a SIGSTOP
3629 pending, this is a no-op. */
3630
3631 if (debug_threads)
3632 fprintf (stderr,
3633 "Client wants LWP %ld to stop. "
3634 "Making sure it has a SIGSTOP pending\n",
3635 lwpid_of (lwp));
3636
3637 send_sigstop (lwp);
3638 }
3639
3640 step = thread->last_resume_kind == resume_step;
3641 linux_resume_one_lwp (lwp, step, 0, NULL);
3642 return 0;
3643 }
3644
3645 static int
3646 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3647 {
3648 struct lwp_info *lwp = (struct lwp_info *) entry;
3649
3650 if (lwp == except)
3651 return 0;
3652
3653 lwp->suspended--;
3654 gdb_assert (lwp->suspended >= 0);
3655
3656 return proceed_one_lwp (entry, except);
3657 }
3658
3659 /* When we finish a step-over, set threads running again. If there's
3660 another thread that may need a step-over, now's the time to start
3661 it. Eventually, we'll move all threads past their breakpoints. */
3662
3663 static void
3664 proceed_all_lwps (void)
3665 {
3666 struct lwp_info *need_step_over;
3667
3668 /* If there is a thread which would otherwise be resumed, which is
3669 stopped at a breakpoint that needs stepping over, then don't
3670 resume any threads - have it step over the breakpoint with all
3671 other threads stopped, then resume all threads again. */
3672
3673 if (supports_breakpoints ())
3674 {
3675 need_step_over
3676 = (struct lwp_info *) find_inferior (&all_lwps,
3677 need_step_over_p, NULL);
3678
3679 if (need_step_over != NULL)
3680 {
3681 if (debug_threads)
3682 fprintf (stderr, "proceed_all_lwps: found "
3683 "thread %ld needing a step-over\n",
3684 lwpid_of (need_step_over));
3685
3686 start_step_over (need_step_over);
3687 return;
3688 }
3689 }
3690
3691 if (debug_threads)
3692 fprintf (stderr, "Proceeding, no step-over needed\n");
3693
3694 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3695 }
3696
3697 /* Stopped LWPs that the client wanted to be running, that don't have
3698 pending statuses, are set to run again, except for EXCEPT, if not
3699 NULL. This undoes a stop_all_lwps call. */
3700
3701 static void
3702 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3703 {
3704 if (debug_threads)
3705 {
3706 if (except)
3707 fprintf (stderr,
3708 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3709 else
3710 fprintf (stderr,
3711 "unstopping all lwps\n");
3712 }
3713
3714 if (unsuspend)
3715 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3716 else
3717 find_inferior (&all_lwps, proceed_one_lwp, except);
3718 }
3719
3720 #ifdef HAVE_LINUX_USRREGS
3721
3722 int
3723 register_addr (int regnum)
3724 {
3725 int addr;
3726
3727 if (regnum < 0 || regnum >= the_low_target.num_regs)
3728 error ("Invalid register number %d.", regnum);
3729
3730 addr = the_low_target.regmap[regnum];
3731
3732 return addr;
3733 }
3734
3735 /* Fetch one register. */
3736 static void
3737 fetch_register (struct regcache *regcache, int regno)
3738 {
3739 CORE_ADDR regaddr;
3740 int i, size;
3741 char *buf;
3742 int pid;
3743
3744 if (regno >= the_low_target.num_regs)
3745 return;
3746 if ((*the_low_target.cannot_fetch_register) (regno))
3747 return;
3748
3749 regaddr = register_addr (regno);
3750 if (regaddr == -1)
3751 return;
3752
3753 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3754 & -sizeof (PTRACE_XFER_TYPE));
3755 buf = alloca (size);
3756
3757 pid = lwpid_of (get_thread_lwp (current_inferior));
3758 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3759 {
3760 errno = 0;
3761 *(PTRACE_XFER_TYPE *) (buf + i) =
3762 ptrace (PTRACE_PEEKUSER, pid,
3763 /* Coerce to a uintptr_t first to avoid potential gcc warning
3764 of coercing an 8 byte integer to a 4 byte pointer. */
3765 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3766 regaddr += sizeof (PTRACE_XFER_TYPE);
3767 if (errno != 0)
3768 error ("reading register %d: %s", regno, strerror (errno));
3769 }
3770
3771 if (the_low_target.supply_ptrace_register)
3772 the_low_target.supply_ptrace_register (regcache, regno, buf);
3773 else
3774 supply_register (regcache, regno, buf);
3775 }
3776
3777 /* Store one register. */
3778 static void
3779 store_register (struct regcache *regcache, int regno)
3780 {
3781 CORE_ADDR regaddr;
3782 int i, size;
3783 char *buf;
3784 int pid;
3785
3786 if (regno >= the_low_target.num_regs)
3787 return;
3788 if ((*the_low_target.cannot_store_register) (regno))
3789 return;
3790
3791 regaddr = register_addr (regno);
3792 if (regaddr == -1)
3793 return;
3794
3795 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3796 & -sizeof (PTRACE_XFER_TYPE));
3797 buf = alloca (size);
3798 memset (buf, 0, size);
3799
3800 if (the_low_target.collect_ptrace_register)
3801 the_low_target.collect_ptrace_register (regcache, regno, buf);
3802 else
3803 collect_register (regcache, regno, buf);
3804
3805 pid = lwpid_of (get_thread_lwp (current_inferior));
3806 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3807 {
3808 errno = 0;
3809 ptrace (PTRACE_POKEUSER, pid,
3810 /* Coerce to a uintptr_t first to avoid potential gcc warning
3811 about coercing an 8 byte integer to a 4 byte pointer. */
3812 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3813 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3814 if (errno != 0)
3815 {
3816 /* At this point, ESRCH should mean the process is
3817 already gone, in which case we simply ignore attempts
3818 to change its registers. See also the related
3819 comment in linux_resume_one_lwp. */
3820 if (errno == ESRCH)
3821 return;
3822
3823 if ((*the_low_target.cannot_store_register) (regno) == 0)
3824 error ("writing register %d: %s", regno, strerror (errno));
3825 }
3826 regaddr += sizeof (PTRACE_XFER_TYPE);
3827 }
3828 }
3829
3830 /* Fetch all registers, or just one, from the child process. */
3831 static void
3832 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3833 {
3834 if (regno == -1)
3835 for (regno = 0; regno < the_low_target.num_regs; regno++)
3836 fetch_register (regcache, regno);
3837 else
3838 fetch_register (regcache, regno);
3839 }
3840
3841 /* Store our register values back into the inferior.
3842 If REGNO is -1, do this for all registers.
3843 Otherwise, REGNO specifies which register (so we can save time). */
3844 static void
3845 usr_store_inferior_registers (struct regcache *regcache, int regno)
3846 {
3847 if (regno == -1)
3848 for (regno = 0; regno < the_low_target.num_regs; regno++)
3849 store_register (regcache, regno);
3850 else
3851 store_register (regcache, regno);
3852 }
3853 #endif /* HAVE_LINUX_USRREGS */
3854
3855
3856
3857 #ifdef HAVE_LINUX_REGSETS
3858
3859 static int
3860 regsets_fetch_inferior_registers (struct regcache *regcache)
3861 {
3862 struct regset_info *regset;
3863 int saw_general_regs = 0;
3864 int pid;
3865 struct iovec iov;
3866
3867 regset = target_regsets;
3868
3869 pid = lwpid_of (get_thread_lwp (current_inferior));
3870 while (regset->size >= 0)
3871 {
3872 void *buf, *data;
3873 int nt_type, res;
3874
3875 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3876 {
3877 regset ++;
3878 continue;
3879 }
3880
3881 buf = xmalloc (regset->size);
3882
3883 nt_type = regset->nt_type;
3884 if (nt_type)
3885 {
3886 iov.iov_base = buf;
3887 iov.iov_len = regset->size;
3888 data = (void *) &iov;
3889 }
3890 else
3891 data = buf;
3892
3893 #ifndef __sparc__
3894 res = ptrace (regset->get_request, pid, nt_type, data);
3895 #else
3896 res = ptrace (regset->get_request, pid, data, nt_type);
3897 #endif
3898 if (res < 0)
3899 {
3900 if (errno == EIO)
3901 {
3902 /* If we get EIO on a regset, do not try it again for
3903 this process. */
3904 disabled_regsets[regset - target_regsets] = 1;
3905 free (buf);
3906 continue;
3907 }
3908 else
3909 {
3910 char s[256];
3911 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3912 pid);
3913 perror (s);
3914 }
3915 }
3916 else if (regset->type == GENERAL_REGS)
3917 saw_general_regs = 1;
3918 regset->store_function (regcache, buf);
3919 regset ++;
3920 free (buf);
3921 }
3922 if (saw_general_regs)
3923 return 0;
3924 else
3925 return 1;
3926 }
3927
3928 static int
3929 regsets_store_inferior_registers (struct regcache *regcache)
3930 {
3931 struct regset_info *regset;
3932 int saw_general_regs = 0;
3933 int pid;
3934 struct iovec iov;
3935
3936 regset = target_regsets;
3937
3938 pid = lwpid_of (get_thread_lwp (current_inferior));
3939 while (regset->size >= 0)
3940 {
3941 void *buf, *data;
3942 int nt_type, res;
3943
3944 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3945 {
3946 regset ++;
3947 continue;
3948 }
3949
3950 buf = xmalloc (regset->size);
3951
3952 /* First fill the buffer with the current register set contents,
3953 in case there are any items in the kernel's regset that are
3954 not in gdbserver's regcache. */
3955
3956 nt_type = regset->nt_type;
3957 if (nt_type)
3958 {
3959 iov.iov_base = buf;
3960 iov.iov_len = regset->size;
3961 data = (void *) &iov;
3962 }
3963 else
3964 data = buf;
3965
3966 #ifndef __sparc__
3967 res = ptrace (regset->get_request, pid, nt_type, data);
3968 #else
3969 res = ptrace (regset->get_request, pid, &iov, data);
3970 #endif
3971
3972 if (res == 0)
3973 {
3974 /* Then overlay our cached registers on that. */
3975 regset->fill_function (regcache, buf);
3976
3977 /* Only now do we write the register set. */
3978 #ifndef __sparc__
3979 res = ptrace (regset->set_request, pid, nt_type, data);
3980 #else
3981 res = ptrace (regset->set_request, pid, data, nt_type);
3982 #endif
3983 }
3984
3985 if (res < 0)
3986 {
3987 if (errno == EIO)
3988 {
3989 /* If we get EIO on a regset, do not try it again for
3990 this process. */
3991 disabled_regsets[regset - target_regsets] = 1;
3992 free (buf);
3993 continue;
3994 }
3995 else if (errno == ESRCH)
3996 {
3997 /* At this point, ESRCH should mean the process is
3998 already gone, in which case we simply ignore attempts
3999 to change its registers. See also the related
4000 comment in linux_resume_one_lwp. */
4001 free (buf);
4002 return 0;
4003 }
4004 else
4005 {
4006 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4007 }
4008 }
4009 else if (regset->type == GENERAL_REGS)
4010 saw_general_regs = 1;
4011 regset ++;
4012 free (buf);
4013 }
4014 if (saw_general_regs)
4015 return 0;
4016 else
4017 return 1;
4018 return 0;
4019 }
4020
4021 #endif /* HAVE_LINUX_REGSETS */
4022
4023
4024 void
4025 linux_fetch_registers (struct regcache *regcache, int regno)
4026 {
4027 #ifdef HAVE_LINUX_REGSETS
4028 if (regsets_fetch_inferior_registers (regcache) == 0)
4029 return;
4030 #endif
4031 #ifdef HAVE_LINUX_USRREGS
4032 usr_fetch_inferior_registers (regcache, regno);
4033 #endif
4034 }
4035
4036 void
4037 linux_store_registers (struct regcache *regcache, int regno)
4038 {
4039 #ifdef HAVE_LINUX_REGSETS
4040 if (regsets_store_inferior_registers (regcache) == 0)
4041 return;
4042 #endif
4043 #ifdef HAVE_LINUX_USRREGS
4044 usr_store_inferior_registers (regcache, regno);
4045 #endif
4046 }
4047
4048
4049 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4050 to debugger memory starting at MYADDR. */
4051
4052 static int
4053 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4054 {
4055 register int i;
4056 /* Round starting address down to longword boundary. */
4057 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4058 /* Round ending address up; get number of longwords that makes. */
4059 register int count
4060 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4061 / sizeof (PTRACE_XFER_TYPE);
4062 /* Allocate buffer of that many longwords. */
4063 register PTRACE_XFER_TYPE *buffer
4064 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4065 int fd;
4066 char filename[64];
4067 int pid = lwpid_of (get_thread_lwp (current_inferior));
4068
4069 /* Try using /proc. Don't bother for one word. */
4070 if (len >= 3 * sizeof (long))
4071 {
4072 /* We could keep this file open and cache it - possibly one per
4073 thread. That requires some juggling, but is even faster. */
4074 sprintf (filename, "/proc/%d/mem", pid);
4075 fd = open (filename, O_RDONLY | O_LARGEFILE);
4076 if (fd == -1)
4077 goto no_proc;
4078
4079 /* If pread64 is available, use it. It's faster if the kernel
4080 supports it (only one syscall), and it's 64-bit safe even on
4081 32-bit platforms (for instance, SPARC debugging a SPARC64
4082 application). */
4083 #ifdef HAVE_PREAD64
4084 if (pread64 (fd, myaddr, len, memaddr) != len)
4085 #else
4086 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4087 #endif
4088 {
4089 close (fd);
4090 goto no_proc;
4091 }
4092
4093 close (fd);
4094 return 0;
4095 }
4096
4097 no_proc:
4098 /* Read all the longwords */
4099 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4100 {
4101 errno = 0;
4102 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4103 about coercing an 8 byte integer to a 4 byte pointer. */
4104 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4105 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4106 if (errno)
4107 return errno;
4108 }
4109
4110 /* Copy appropriate bytes out of the buffer. */
4111 memcpy (myaddr,
4112 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4113 len);
4114
4115 return 0;
4116 }
4117
4118 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4119 memory at MEMADDR. On failure (cannot write to the inferior)
4120 returns the value of errno. */
4121
4122 static int
4123 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4124 {
4125 register int i;
4126 /* Round starting address down to longword boundary. */
4127 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4128 /* Round ending address up; get number of longwords that makes. */
4129 register int count
4130 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4131 / sizeof (PTRACE_XFER_TYPE);
4132
4133 /* Allocate buffer of that many longwords. */
4134 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4135 alloca (count * sizeof (PTRACE_XFER_TYPE));
4136
4137 int pid = lwpid_of (get_thread_lwp (current_inferior));
4138
4139 if (debug_threads)
4140 {
4141 /* Dump up to four bytes. */
4142 unsigned int val = * (unsigned int *) myaddr;
4143 if (len == 1)
4144 val = val & 0xff;
4145 else if (len == 2)
4146 val = val & 0xffff;
4147 else if (len == 3)
4148 val = val & 0xffffff;
4149 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4150 val, (long)memaddr);
4151 }
4152
4153 /* Fill start and end extra bytes of buffer with existing memory data. */
4154
4155 errno = 0;
4156 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4157 about coercing an 8 byte integer to a 4 byte pointer. */
4158 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4159 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4160 if (errno)
4161 return errno;
4162
4163 if (count > 1)
4164 {
4165 errno = 0;
4166 buffer[count - 1]
4167 = ptrace (PTRACE_PEEKTEXT, pid,
4168 /* Coerce to a uintptr_t first to avoid potential gcc warning
4169 about coercing an 8 byte integer to a 4 byte pointer. */
4170 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4171 * sizeof (PTRACE_XFER_TYPE)),
4172 0);
4173 if (errno)
4174 return errno;
4175 }
4176
4177 /* Copy data to be written over corresponding part of buffer. */
4178
4179 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4180 myaddr, len);
4181
4182 /* Write the entire buffer. */
4183
4184 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4185 {
4186 errno = 0;
4187 ptrace (PTRACE_POKETEXT, pid,
4188 /* Coerce to a uintptr_t first to avoid potential gcc warning
4189 about coercing an 8 byte integer to a 4 byte pointer. */
4190 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4191 (PTRACE_ARG4_TYPE) buffer[i]);
4192 if (errno)
4193 return errno;
4194 }
4195
4196 return 0;
4197 }
4198
4199 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4200 static int linux_supports_tracefork_flag;
4201
4202 static void
4203 linux_enable_event_reporting (int pid)
4204 {
4205 if (!linux_supports_tracefork_flag)
4206 return;
4207
4208 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4209 }
4210
4211 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4212
4213 static int
4214 linux_tracefork_grandchild (void *arg)
4215 {
4216 _exit (0);
4217 }
4218
4219 #define STACK_SIZE 4096
4220
4221 static int
4222 linux_tracefork_child (void *arg)
4223 {
4224 ptrace (PTRACE_TRACEME, 0, 0, 0);
4225 kill (getpid (), SIGSTOP);
4226
4227 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4228
4229 if (fork () == 0)
4230 linux_tracefork_grandchild (NULL);
4231
4232 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4233
4234 #ifdef __ia64__
4235 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4236 CLONE_VM | SIGCHLD, NULL);
4237 #else
4238 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4239 CLONE_VM | SIGCHLD, NULL);
4240 #endif
4241
4242 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4243
4244 _exit (0);
4245 }
4246
4247 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4248 sure that we can enable the option, and that it had the desired
4249 effect. */
4250
4251 static void
4252 linux_test_for_tracefork (void)
4253 {
4254 int child_pid, ret, status;
4255 long second_pid;
4256 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4257 char *stack = xmalloc (STACK_SIZE * 4);
4258 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4259
4260 linux_supports_tracefork_flag = 0;
4261
4262 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4263
4264 child_pid = fork ();
4265 if (child_pid == 0)
4266 linux_tracefork_child (NULL);
4267
4268 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4269
4270 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4271 #ifdef __ia64__
4272 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4273 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4274 #else /* !__ia64__ */
4275 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4276 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4277 #endif /* !__ia64__ */
4278
4279 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4280
4281 if (child_pid == -1)
4282 perror_with_name ("clone");
4283
4284 ret = my_waitpid (child_pid, &status, 0);
4285 if (ret == -1)
4286 perror_with_name ("waitpid");
4287 else if (ret != child_pid)
4288 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4289 if (! WIFSTOPPED (status))
4290 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4291
4292 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4293 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4294 if (ret != 0)
4295 {
4296 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4297 if (ret != 0)
4298 {
4299 warning ("linux_test_for_tracefork: failed to kill child");
4300 return;
4301 }
4302
4303 ret = my_waitpid (child_pid, &status, 0);
4304 if (ret != child_pid)
4305 warning ("linux_test_for_tracefork: failed to wait for killed child");
4306 else if (!WIFSIGNALED (status))
4307 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4308 "killed child", status);
4309
4310 return;
4311 }
4312
4313 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4314 if (ret != 0)
4315 warning ("linux_test_for_tracefork: failed to resume child");
4316
4317 ret = my_waitpid (child_pid, &status, 0);
4318
4319 if (ret == child_pid && WIFSTOPPED (status)
4320 && status >> 16 == PTRACE_EVENT_FORK)
4321 {
4322 second_pid = 0;
4323 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4324 if (ret == 0 && second_pid != 0)
4325 {
4326 int second_status;
4327
4328 linux_supports_tracefork_flag = 1;
4329 my_waitpid (second_pid, &second_status, 0);
4330 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4331 if (ret != 0)
4332 warning ("linux_test_for_tracefork: failed to kill second child");
4333 my_waitpid (second_pid, &status, 0);
4334 }
4335 }
4336 else
4337 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4338 "(%d, status 0x%x)", ret, status);
4339
4340 do
4341 {
4342 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4343 if (ret != 0)
4344 warning ("linux_test_for_tracefork: failed to kill child");
4345 my_waitpid (child_pid, &status, 0);
4346 }
4347 while (WIFSTOPPED (status));
4348
4349 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4350 free (stack);
4351 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4352 }
4353
4354
4355 static void
4356 linux_look_up_symbols (void)
4357 {
4358 #ifdef USE_THREAD_DB
4359 struct process_info *proc = current_process ();
4360
4361 if (proc->private->thread_db != NULL)
4362 return;
4363
4364 /* If the kernel supports tracing forks then it also supports tracing
4365 clones, and then we don't need to use the magic thread event breakpoint
4366 to learn about threads. */
4367 thread_db_init (!linux_supports_tracefork_flag);
4368 #endif
4369 }
4370
4371 static void
4372 linux_request_interrupt (void)
4373 {
4374 extern unsigned long signal_pid;
4375
4376 if (!ptid_equal (cont_thread, null_ptid)
4377 && !ptid_equal (cont_thread, minus_one_ptid))
4378 {
4379 struct lwp_info *lwp;
4380 int lwpid;
4381
4382 lwp = get_thread_lwp (current_inferior);
4383 lwpid = lwpid_of (lwp);
4384 kill_lwp (lwpid, SIGINT);
4385 }
4386 else
4387 kill_lwp (signal_pid, SIGINT);
4388 }
4389
4390 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4391 to debugger memory starting at MYADDR. */
4392
4393 static int
4394 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4395 {
4396 char filename[PATH_MAX];
4397 int fd, n;
4398 int pid = lwpid_of (get_thread_lwp (current_inferior));
4399
4400 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4401
4402 fd = open (filename, O_RDONLY);
4403 if (fd < 0)
4404 return -1;
4405
4406 if (offset != (CORE_ADDR) 0
4407 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4408 n = -1;
4409 else
4410 n = read (fd, myaddr, len);
4411
4412 close (fd);
4413
4414 return n;
4415 }
4416
4417 /* These breakpoint and watchpoint related wrapper functions simply
4418 pass on the function call if the target has registered a
4419 corresponding function. */
4420
4421 static int
4422 linux_insert_point (char type, CORE_ADDR addr, int len)
4423 {
4424 if (the_low_target.insert_point != NULL)
4425 return the_low_target.insert_point (type, addr, len);
4426 else
4427 /* Unsupported (see target.h). */
4428 return 1;
4429 }
4430
4431 static int
4432 linux_remove_point (char type, CORE_ADDR addr, int len)
4433 {
4434 if (the_low_target.remove_point != NULL)
4435 return the_low_target.remove_point (type, addr, len);
4436 else
4437 /* Unsupported (see target.h). */
4438 return 1;
4439 }
4440
4441 static int
4442 linux_stopped_by_watchpoint (void)
4443 {
4444 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4445
4446 return lwp->stopped_by_watchpoint;
4447 }
4448
4449 static CORE_ADDR
4450 linux_stopped_data_address (void)
4451 {
4452 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4453
4454 return lwp->stopped_data_address;
4455 }
4456
4457 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4458 #if defined(__mcoldfire__)
4459 /* These should really be defined in the kernel's ptrace.h header. */
4460 #define PT_TEXT_ADDR 49*4
4461 #define PT_DATA_ADDR 50*4
4462 #define PT_TEXT_END_ADDR 51*4
4463 #elif defined(BFIN)
4464 #define PT_TEXT_ADDR 220
4465 #define PT_TEXT_END_ADDR 224
4466 #define PT_DATA_ADDR 228
4467 #elif defined(__TMS320C6X__)
4468 #define PT_TEXT_ADDR (0x10000*4)
4469 #define PT_DATA_ADDR (0x10004*4)
4470 #define PT_TEXT_END_ADDR (0x10008*4)
4471 #endif
4472
4473 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4474 to tell gdb about. */
4475
4476 static int
4477 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4478 {
4479 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4480 unsigned long text, text_end, data;
4481 int pid = lwpid_of (get_thread_lwp (current_inferior));
4482
4483 errno = 0;
4484
4485 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4486 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4487 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4488
4489 if (errno == 0)
4490 {
4491 /* Both text and data offsets produced at compile-time (and so
4492 used by gdb) are relative to the beginning of the program,
4493 with the data segment immediately following the text segment.
4494 However, the actual runtime layout in memory may put the data
4495 somewhere else, so when we send gdb a data base-address, we
4496 use the real data base address and subtract the compile-time
4497 data base-address from it (which is just the length of the
4498 text segment). BSS immediately follows data in both
4499 cases. */
4500 *text_p = text;
4501 *data_p = data - (text_end - text);
4502
4503 return 1;
4504 }
4505 #endif
4506 return 0;
4507 }
4508 #endif
4509
4510 static int
4511 linux_qxfer_osdata (const char *annex,
4512 unsigned char *readbuf, unsigned const char *writebuf,
4513 CORE_ADDR offset, int len)
4514 {
4515 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4516 }
4517
4518 /* Convert a native/host siginfo object, into/from the siginfo in the
4519 layout of the inferiors' architecture. */
4520
4521 static void
4522 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4523 {
4524 int done = 0;
4525
4526 if (the_low_target.siginfo_fixup != NULL)
4527 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4528
4529 /* If there was no callback, or the callback didn't do anything,
4530 then just do a straight memcpy. */
4531 if (!done)
4532 {
4533 if (direction == 1)
4534 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4535 else
4536 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4537 }
4538 }
4539
4540 static int
4541 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4542 unsigned const char *writebuf, CORE_ADDR offset, int len)
4543 {
4544 int pid;
4545 struct siginfo siginfo;
4546 char inf_siginfo[sizeof (struct siginfo)];
4547
4548 if (current_inferior == NULL)
4549 return -1;
4550
4551 pid = lwpid_of (get_thread_lwp (current_inferior));
4552
4553 if (debug_threads)
4554 fprintf (stderr, "%s siginfo for lwp %d.\n",
4555 readbuf != NULL ? "Reading" : "Writing",
4556 pid);
4557
4558 if (offset >= sizeof (siginfo))
4559 return -1;
4560
4561 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4562 return -1;
4563
4564 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4565 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4566 inferior with a 64-bit GDBSERVER should look the same as debugging it
4567 with a 32-bit GDBSERVER, we need to convert it. */
4568 siginfo_fixup (&siginfo, inf_siginfo, 0);
4569
4570 if (offset + len > sizeof (siginfo))
4571 len = sizeof (siginfo) - offset;
4572
4573 if (readbuf != NULL)
4574 memcpy (readbuf, inf_siginfo + offset, len);
4575 else
4576 {
4577 memcpy (inf_siginfo + offset, writebuf, len);
4578
4579 /* Convert back to ptrace layout before flushing it out. */
4580 siginfo_fixup (&siginfo, inf_siginfo, 1);
4581
4582 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4583 return -1;
4584 }
4585
4586 return len;
4587 }
4588
4589 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4590 so we notice when children change state; as the handler for the
4591 sigsuspend in my_waitpid. */
4592
4593 static void
4594 sigchld_handler (int signo)
4595 {
4596 int old_errno = errno;
4597
4598 if (debug_threads)
4599 {
4600 do
4601 {
4602 /* fprintf is not async-signal-safe, so call write
4603 directly. */
4604 if (write (2, "sigchld_handler\n",
4605 sizeof ("sigchld_handler\n") - 1) < 0)
4606 break; /* just ignore */
4607 } while (0);
4608 }
4609
4610 if (target_is_async_p ())
4611 async_file_mark (); /* trigger a linux_wait */
4612
4613 errno = old_errno;
4614 }
4615
4616 static int
4617 linux_supports_non_stop (void)
4618 {
4619 return 1;
4620 }
4621
4622 static int
4623 linux_async (int enable)
4624 {
4625 int previous = (linux_event_pipe[0] != -1);
4626
4627 if (debug_threads)
4628 fprintf (stderr, "linux_async (%d), previous=%d\n",
4629 enable, previous);
4630
4631 if (previous != enable)
4632 {
4633 sigset_t mask;
4634 sigemptyset (&mask);
4635 sigaddset (&mask, SIGCHLD);
4636
4637 sigprocmask (SIG_BLOCK, &mask, NULL);
4638
4639 if (enable)
4640 {
4641 if (pipe (linux_event_pipe) == -1)
4642 fatal ("creating event pipe failed.");
4643
4644 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4645 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4646
4647 /* Register the event loop handler. */
4648 add_file_handler (linux_event_pipe[0],
4649 handle_target_event, NULL);
4650
4651 /* Always trigger a linux_wait. */
4652 async_file_mark ();
4653 }
4654 else
4655 {
4656 delete_file_handler (linux_event_pipe[0]);
4657
4658 close (linux_event_pipe[0]);
4659 close (linux_event_pipe[1]);
4660 linux_event_pipe[0] = -1;
4661 linux_event_pipe[1] = -1;
4662 }
4663
4664 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4665 }
4666
4667 return previous;
4668 }
4669
4670 static int
4671 linux_start_non_stop (int nonstop)
4672 {
4673 /* Register or unregister from event-loop accordingly. */
4674 linux_async (nonstop);
4675 return 0;
4676 }
4677
4678 static int
4679 linux_supports_multi_process (void)
4680 {
4681 return 1;
4682 }
4683
4684 static int
4685 linux_supports_disable_randomization (void)
4686 {
4687 #ifdef HAVE_PERSONALITY
4688 return 1;
4689 #else
4690 return 0;
4691 #endif
4692 }
4693
4694 /* Enumerate spufs IDs for process PID. */
4695 static int
4696 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4697 {
4698 int pos = 0;
4699 int written = 0;
4700 char path[128];
4701 DIR *dir;
4702 struct dirent *entry;
4703
4704 sprintf (path, "/proc/%ld/fd", pid);
4705 dir = opendir (path);
4706 if (!dir)
4707 return -1;
4708
4709 rewinddir (dir);
4710 while ((entry = readdir (dir)) != NULL)
4711 {
4712 struct stat st;
4713 struct statfs stfs;
4714 int fd;
4715
4716 fd = atoi (entry->d_name);
4717 if (!fd)
4718 continue;
4719
4720 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4721 if (stat (path, &st) != 0)
4722 continue;
4723 if (!S_ISDIR (st.st_mode))
4724 continue;
4725
4726 if (statfs (path, &stfs) != 0)
4727 continue;
4728 if (stfs.f_type != SPUFS_MAGIC)
4729 continue;
4730
4731 if (pos >= offset && pos + 4 <= offset + len)
4732 {
4733 *(unsigned int *)(buf + pos - offset) = fd;
4734 written += 4;
4735 }
4736 pos += 4;
4737 }
4738
4739 closedir (dir);
4740 return written;
4741 }
4742
4743 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4744 object type, using the /proc file system. */
4745 static int
4746 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4747 unsigned const char *writebuf,
4748 CORE_ADDR offset, int len)
4749 {
4750 long pid = lwpid_of (get_thread_lwp (current_inferior));
4751 char buf[128];
4752 int fd = 0;
4753 int ret = 0;
4754
4755 if (!writebuf && !readbuf)
4756 return -1;
4757
4758 if (!*annex)
4759 {
4760 if (!readbuf)
4761 return -1;
4762 else
4763 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4764 }
4765
4766 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4767 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4768 if (fd <= 0)
4769 return -1;
4770
4771 if (offset != 0
4772 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4773 {
4774 close (fd);
4775 return 0;
4776 }
4777
4778 if (writebuf)
4779 ret = write (fd, writebuf, (size_t) len);
4780 else
4781 ret = read (fd, readbuf, (size_t) len);
4782
4783 close (fd);
4784 return ret;
4785 }
4786
4787 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4788 struct target_loadseg
4789 {
4790 /* Core address to which the segment is mapped. */
4791 Elf32_Addr addr;
4792 /* VMA recorded in the program header. */
4793 Elf32_Addr p_vaddr;
4794 /* Size of this segment in memory. */
4795 Elf32_Word p_memsz;
4796 };
4797
4798 # if defined PT_GETDSBT
4799 struct target_loadmap
4800 {
4801 /* Protocol version number, must be zero. */
4802 Elf32_Word version;
4803 /* Pointer to the DSBT table, its size, and the DSBT index. */
4804 unsigned *dsbt_table;
4805 unsigned dsbt_size, dsbt_index;
4806 /* Number of segments in this map. */
4807 Elf32_Word nsegs;
4808 /* The actual memory map. */
4809 struct target_loadseg segs[/*nsegs*/];
4810 };
4811 # define LINUX_LOADMAP PT_GETDSBT
4812 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4813 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4814 # else
4815 struct target_loadmap
4816 {
4817 /* Protocol version number, must be zero. */
4818 Elf32_Half version;
4819 /* Number of segments in this map. */
4820 Elf32_Half nsegs;
4821 /* The actual memory map. */
4822 struct target_loadseg segs[/*nsegs*/];
4823 };
4824 # define LINUX_LOADMAP PTRACE_GETFDPIC
4825 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4826 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4827 # endif
4828
4829 static int
4830 linux_read_loadmap (const char *annex, CORE_ADDR offset,
4831 unsigned char *myaddr, unsigned int len)
4832 {
4833 int pid = lwpid_of (get_thread_lwp (current_inferior));
4834 int addr = -1;
4835 struct target_loadmap *data = NULL;
4836 unsigned int actual_length, copy_length;
4837
4838 if (strcmp (annex, "exec") == 0)
4839 addr = (int) LINUX_LOADMAP_EXEC;
4840 else if (strcmp (annex, "interp") == 0)
4841 addr = (int) LINUX_LOADMAP_INTERP;
4842 else
4843 return -1;
4844
4845 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
4846 return -1;
4847
4848 if (data == NULL)
4849 return -1;
4850
4851 actual_length = sizeof (struct target_loadmap)
4852 + sizeof (struct target_loadseg) * data->nsegs;
4853
4854 if (offset < 0 || offset > actual_length)
4855 return -1;
4856
4857 copy_length = actual_length - offset < len ? actual_length - offset : len;
4858 memcpy (myaddr, (char *) data + offset, copy_length);
4859 return copy_length;
4860 }
4861 #else
4862 # define linux_read_loadmap NULL
4863 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
4864
4865 static void
4866 linux_process_qsupported (const char *query)
4867 {
4868 if (the_low_target.process_qsupported != NULL)
4869 the_low_target.process_qsupported (query);
4870 }
4871
4872 static int
4873 linux_supports_tracepoints (void)
4874 {
4875 if (*the_low_target.supports_tracepoints == NULL)
4876 return 0;
4877
4878 return (*the_low_target.supports_tracepoints) ();
4879 }
4880
4881 static CORE_ADDR
4882 linux_read_pc (struct regcache *regcache)
4883 {
4884 if (the_low_target.get_pc == NULL)
4885 return 0;
4886
4887 return (*the_low_target.get_pc) (regcache);
4888 }
4889
4890 static void
4891 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4892 {
4893 gdb_assert (the_low_target.set_pc != NULL);
4894
4895 (*the_low_target.set_pc) (regcache, pc);
4896 }
4897
4898 static int
4899 linux_thread_stopped (struct thread_info *thread)
4900 {
4901 return get_thread_lwp (thread)->stopped;
4902 }
4903
4904 /* This exposes stop-all-threads functionality to other modules. */
4905
4906 static void
4907 linux_pause_all (int freeze)
4908 {
4909 stop_all_lwps (freeze, NULL);
4910 }
4911
4912 /* This exposes unstop-all-threads functionality to other gdbserver
4913 modules. */
4914
4915 static void
4916 linux_unpause_all (int unfreeze)
4917 {
4918 unstop_all_lwps (unfreeze, NULL);
4919 }
4920
4921 static int
4922 linux_prepare_to_access_memory (void)
4923 {
4924 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4925 running LWP. */
4926 if (non_stop)
4927 linux_pause_all (1);
4928 return 0;
4929 }
4930
4931 static void
4932 linux_done_accessing_memory (void)
4933 {
4934 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4935 running LWP. */
4936 if (non_stop)
4937 linux_unpause_all (1);
4938 }
4939
4940 static int
4941 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4942 CORE_ADDR collector,
4943 CORE_ADDR lockaddr,
4944 ULONGEST orig_size,
4945 CORE_ADDR *jump_entry,
4946 CORE_ADDR *trampoline,
4947 ULONGEST *trampoline_size,
4948 unsigned char *jjump_pad_insn,
4949 ULONGEST *jjump_pad_insn_size,
4950 CORE_ADDR *adjusted_insn_addr,
4951 CORE_ADDR *adjusted_insn_addr_end,
4952 char *err)
4953 {
4954 return (*the_low_target.install_fast_tracepoint_jump_pad)
4955 (tpoint, tpaddr, collector, lockaddr, orig_size,
4956 jump_entry, trampoline, trampoline_size,
4957 jjump_pad_insn, jjump_pad_insn_size,
4958 adjusted_insn_addr, adjusted_insn_addr_end,
4959 err);
4960 }
4961
4962 static struct emit_ops *
4963 linux_emit_ops (void)
4964 {
4965 if (the_low_target.emit_ops != NULL)
4966 return (*the_low_target.emit_ops) ();
4967 else
4968 return NULL;
4969 }
4970
4971 static int
4972 linux_get_min_fast_tracepoint_insn_len (void)
4973 {
4974 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
4975 }
4976
4977 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
4978
4979 static int
4980 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
4981 CORE_ADDR *phdr_memaddr, int *num_phdr)
4982 {
4983 char filename[PATH_MAX];
4984 int fd;
4985 const int auxv_size = is_elf64
4986 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
4987 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
4988
4989 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4990
4991 fd = open (filename, O_RDONLY);
4992 if (fd < 0)
4993 return 1;
4994
4995 *phdr_memaddr = 0;
4996 *num_phdr = 0;
4997 while (read (fd, buf, auxv_size) == auxv_size
4998 && (*phdr_memaddr == 0 || *num_phdr == 0))
4999 {
5000 if (is_elf64)
5001 {
5002 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5003
5004 switch (aux->a_type)
5005 {
5006 case AT_PHDR:
5007 *phdr_memaddr = aux->a_un.a_val;
5008 break;
5009 case AT_PHNUM:
5010 *num_phdr = aux->a_un.a_val;
5011 break;
5012 }
5013 }
5014 else
5015 {
5016 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5017
5018 switch (aux->a_type)
5019 {
5020 case AT_PHDR:
5021 *phdr_memaddr = aux->a_un.a_val;
5022 break;
5023 case AT_PHNUM:
5024 *num_phdr = aux->a_un.a_val;
5025 break;
5026 }
5027 }
5028 }
5029
5030 close (fd);
5031
5032 if (*phdr_memaddr == 0 || *num_phdr == 0)
5033 {
5034 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5035 "phdr_memaddr = %ld, phdr_num = %d",
5036 (long) *phdr_memaddr, *num_phdr);
5037 return 2;
5038 }
5039
5040 return 0;
5041 }
5042
5043 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5044
5045 static CORE_ADDR
5046 get_dynamic (const int pid, const int is_elf64)
5047 {
5048 CORE_ADDR phdr_memaddr, relocation;
5049 int num_phdr, i;
5050 unsigned char *phdr_buf;
5051 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5052
5053 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5054 return 0;
5055
5056 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5057 phdr_buf = alloca (num_phdr * phdr_size);
5058
5059 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5060 return 0;
5061
5062 /* Compute relocation: it is expected to be 0 for "regular" executables,
5063 non-zero for PIE ones. */
5064 relocation = -1;
5065 for (i = 0; relocation == -1 && i < num_phdr; i++)
5066 if (is_elf64)
5067 {
5068 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5069
5070 if (p->p_type == PT_PHDR)
5071 relocation = phdr_memaddr - p->p_vaddr;
5072 }
5073 else
5074 {
5075 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5076
5077 if (p->p_type == PT_PHDR)
5078 relocation = phdr_memaddr - p->p_vaddr;
5079 }
5080
5081 if (relocation == -1)
5082 {
5083 warning ("Unexpected missing PT_PHDR");
5084 return 0;
5085 }
5086
5087 for (i = 0; i < num_phdr; i++)
5088 {
5089 if (is_elf64)
5090 {
5091 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5092
5093 if (p->p_type == PT_DYNAMIC)
5094 return p->p_vaddr + relocation;
5095 }
5096 else
5097 {
5098 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5099
5100 if (p->p_type == PT_DYNAMIC)
5101 return p->p_vaddr + relocation;
5102 }
5103 }
5104
5105 return 0;
5106 }
5107
5108 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5109 can be 0 if the inferior does not yet have the library list initialized. */
5110
5111 static CORE_ADDR
5112 get_r_debug (const int pid, const int is_elf64)
5113 {
5114 CORE_ADDR dynamic_memaddr;
5115 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5116 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5117
5118 dynamic_memaddr = get_dynamic (pid, is_elf64);
5119 if (dynamic_memaddr == 0)
5120 return (CORE_ADDR) -1;
5121
5122 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5123 {
5124 if (is_elf64)
5125 {
5126 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5127
5128 if (dyn->d_tag == DT_DEBUG)
5129 return dyn->d_un.d_val;
5130
5131 if (dyn->d_tag == DT_NULL)
5132 break;
5133 }
5134 else
5135 {
5136 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5137
5138 if (dyn->d_tag == DT_DEBUG)
5139 return dyn->d_un.d_val;
5140
5141 if (dyn->d_tag == DT_NULL)
5142 break;
5143 }
5144
5145 dynamic_memaddr += dyn_size;
5146 }
5147
5148 return (CORE_ADDR) -1;
5149 }
5150
5151 /* Read one pointer from MEMADDR in the inferior. */
5152
5153 static int
5154 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5155 {
5156 *ptr = 0;
5157 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5158 }
5159
5160 struct link_map_offsets
5161 {
5162 /* Offset and size of r_debug.r_version. */
5163 int r_version_offset;
5164
5165 /* Offset and size of r_debug.r_map. */
5166 int r_map_offset;
5167
5168 /* Offset to l_addr field in struct link_map. */
5169 int l_addr_offset;
5170
5171 /* Offset to l_name field in struct link_map. */
5172 int l_name_offset;
5173
5174 /* Offset to l_ld field in struct link_map. */
5175 int l_ld_offset;
5176
5177 /* Offset to l_next field in struct link_map. */
5178 int l_next_offset;
5179
5180 /* Offset to l_prev field in struct link_map. */
5181 int l_prev_offset;
5182 };
5183
5184 /* Construct qXfer:libraries:read reply. */
5185
5186 static int
5187 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5188 unsigned const char *writebuf,
5189 CORE_ADDR offset, int len)
5190 {
5191 char *document;
5192 unsigned document_len;
5193 struct process_info_private *const priv = current_process ()->private;
5194 char filename[PATH_MAX];
5195 int pid, is_elf64;
5196
5197 static const struct link_map_offsets lmo_32bit_offsets =
5198 {
5199 0, /* r_version offset. */
5200 4, /* r_debug.r_map offset. */
5201 0, /* l_addr offset in link_map. */
5202 4, /* l_name offset in link_map. */
5203 8, /* l_ld offset in link_map. */
5204 12, /* l_next offset in link_map. */
5205 16 /* l_prev offset in link_map. */
5206 };
5207
5208 static const struct link_map_offsets lmo_64bit_offsets =
5209 {
5210 0, /* r_version offset. */
5211 8, /* r_debug.r_map offset. */
5212 0, /* l_addr offset in link_map. */
5213 8, /* l_name offset in link_map. */
5214 16, /* l_ld offset in link_map. */
5215 24, /* l_next offset in link_map. */
5216 32 /* l_prev offset in link_map. */
5217 };
5218 const struct link_map_offsets *lmo;
5219
5220 if (writebuf != NULL)
5221 return -2;
5222 if (readbuf == NULL)
5223 return -1;
5224
5225 pid = lwpid_of (get_thread_lwp (current_inferior));
5226 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5227 is_elf64 = elf_64_file_p (filename);
5228 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5229
5230 if (priv->r_debug == 0)
5231 priv->r_debug = get_r_debug (pid, is_elf64);
5232
5233 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5234 {
5235 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5236 }
5237 else
5238 {
5239 int allocated = 1024;
5240 char *p;
5241 const int ptr_size = is_elf64 ? 8 : 4;
5242 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5243 int r_version, header_done = 0;
5244
5245 document = xmalloc (allocated);
5246 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5247 p = document + strlen (document);
5248
5249 r_version = 0;
5250 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5251 (unsigned char *) &r_version,
5252 sizeof (r_version)) != 0
5253 || r_version != 1)
5254 {
5255 warning ("unexpected r_debug version %d", r_version);
5256 goto done;
5257 }
5258
5259 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5260 &lm_addr, ptr_size) != 0)
5261 {
5262 warning ("unable to read r_map from 0x%lx",
5263 (long) priv->r_debug + lmo->r_map_offset);
5264 goto done;
5265 }
5266
5267 lm_prev = 0;
5268 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5269 &l_name, ptr_size) == 0
5270 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5271 &l_addr, ptr_size) == 0
5272 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5273 &l_ld, ptr_size) == 0
5274 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5275 &l_prev, ptr_size) == 0
5276 && read_one_ptr (lm_addr + lmo->l_next_offset,
5277 &l_next, ptr_size) == 0)
5278 {
5279 unsigned char libname[PATH_MAX];
5280
5281 if (lm_prev != l_prev)
5282 {
5283 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5284 (long) lm_prev, (long) l_prev);
5285 break;
5286 }
5287
5288 /* Not checking for error because reading may stop before
5289 we've got PATH_MAX worth of characters. */
5290 libname[0] = '\0';
5291 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5292 libname[sizeof (libname) - 1] = '\0';
5293 if (libname[0] != '\0')
5294 {
5295 /* 6x the size for xml_escape_text below. */
5296 size_t len = 6 * strlen ((char *) libname);
5297 char *name;
5298
5299 if (!header_done)
5300 {
5301 /* Terminate `<library-list-svr4'. */
5302 *p++ = '>';
5303 header_done = 1;
5304 }
5305
5306 while (allocated < p - document + len + 200)
5307 {
5308 /* Expand to guarantee sufficient storage. */
5309 uintptr_t document_len = p - document;
5310
5311 document = xrealloc (document, 2 * allocated);
5312 allocated *= 2;
5313 p = document + document_len;
5314 }
5315
5316 name = xml_escape_text ((char *) libname);
5317 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5318 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5319 name, (unsigned long) lm_addr,
5320 (unsigned long) l_addr, (unsigned long) l_ld);
5321 free (name);
5322 }
5323 else if (lm_prev == 0)
5324 {
5325 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5326 p = p + strlen (p);
5327 }
5328
5329 if (l_next == 0)
5330 break;
5331
5332 lm_prev = lm_addr;
5333 lm_addr = l_next;
5334 }
5335 done:
5336 strcpy (p, "</library-list-svr4>");
5337 }
5338
5339 document_len = strlen (document);
5340 if (offset < document_len)
5341 document_len -= offset;
5342 else
5343 document_len = 0;
5344 if (len > document_len)
5345 len = document_len;
5346
5347 memcpy (readbuf, document + offset, len);
5348 xfree (document);
5349
5350 return len;
5351 }
5352
5353 static struct target_ops linux_target_ops = {
5354 linux_create_inferior,
5355 linux_attach,
5356 linux_kill,
5357 linux_detach,
5358 linux_mourn,
5359 linux_join,
5360 linux_thread_alive,
5361 linux_resume,
5362 linux_wait,
5363 linux_fetch_registers,
5364 linux_store_registers,
5365 linux_prepare_to_access_memory,
5366 linux_done_accessing_memory,
5367 linux_read_memory,
5368 linux_write_memory,
5369 linux_look_up_symbols,
5370 linux_request_interrupt,
5371 linux_read_auxv,
5372 linux_insert_point,
5373 linux_remove_point,
5374 linux_stopped_by_watchpoint,
5375 linux_stopped_data_address,
5376 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5377 linux_read_offsets,
5378 #else
5379 NULL,
5380 #endif
5381 #ifdef USE_THREAD_DB
5382 thread_db_get_tls_address,
5383 #else
5384 NULL,
5385 #endif
5386 linux_qxfer_spu,
5387 hostio_last_error_from_errno,
5388 linux_qxfer_osdata,
5389 linux_xfer_siginfo,
5390 linux_supports_non_stop,
5391 linux_async,
5392 linux_start_non_stop,
5393 linux_supports_multi_process,
5394 #ifdef USE_THREAD_DB
5395 thread_db_handle_monitor_command,
5396 #else
5397 NULL,
5398 #endif
5399 linux_common_core_of_thread,
5400 linux_read_loadmap,
5401 linux_process_qsupported,
5402 linux_supports_tracepoints,
5403 linux_read_pc,
5404 linux_write_pc,
5405 linux_thread_stopped,
5406 NULL,
5407 linux_pause_all,
5408 linux_unpause_all,
5409 linux_cancel_breakpoints,
5410 linux_stabilize_threads,
5411 linux_install_fast_tracepoint_jump_pad,
5412 linux_emit_ops,
5413 linux_supports_disable_randomization,
5414 linux_get_min_fast_tracepoint_insn_len,
5415 linux_qxfer_libraries_svr4,
5416 };
5417
5418 static void
5419 linux_init_signals ()
5420 {
5421 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5422 to find what the cancel signal actually is. */
5423 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5424 signal (__SIGRTMIN+1, SIG_IGN);
5425 #endif
5426 }
5427
5428 void
5429 initialize_low (void)
5430 {
5431 struct sigaction sigchld_action;
5432 memset (&sigchld_action, 0, sizeof (sigchld_action));
5433 set_target_ops (&linux_target_ops);
5434 set_breakpoint_data (the_low_target.breakpoint,
5435 the_low_target.breakpoint_len);
5436 linux_init_signals ();
5437 linux_test_for_tracefork ();
5438 #ifdef HAVE_LINUX_REGSETS
5439 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5440 ;
5441 disabled_regsets = xmalloc (num_regsets);
5442 #endif
5443
5444 sigchld_action.sa_handler = sigchld_handler;
5445 sigemptyset (&sigchld_action.sa_mask);
5446 sigchld_action.sa_flags = SA_RESTART;
5447 sigaction (SIGCHLD, &sigchld_action, NULL);
5448 }
This page took 0.183256 seconds and 4 git commands to generate.