Linux: No need to set ptrace event options in fork/clone children.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "gdb_wait.h"
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include "gdb_stat.h"
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 /* PTRACE_TEXT_ADDR and friends. */
82 #include <asm/ptrace.h>
83 #define HAS_NOMMU
84 #endif
85 #endif
86
87 #ifdef HAVE_LINUX_BTRACE
88 # include "linux-btrace.h"
89 #endif
90
91 #ifndef HAVE_ELF32_AUXV_T
92 /* Copied from glibc's elf.h. */
93 typedef struct
94 {
95 uint32_t a_type; /* Entry type */
96 union
97 {
98 uint32_t a_val; /* Integer value */
99 /* We use to have pointer elements added here. We cannot do that,
100 though, since it does not work when using 32-bit definitions
101 on 64-bit platforms and vice versa. */
102 } a_un;
103 } Elf32_auxv_t;
104 #endif
105
106 #ifndef HAVE_ELF64_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
109 {
110 uint64_t a_type; /* Entry type */
111 union
112 {
113 uint64_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf64_auxv_t;
119 #endif
120
121 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
122 representation of the thread ID.
123
124 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
125 the same as the LWP ID.
126
127 ``all_processes'' is keyed by the "overall process ID", which
128 GNU/Linux calls tgid, "thread group ID". */
129
130 struct inferior_list all_lwps;
131
132 /* A list of all unknown processes which receive stop signals. Some
133 other process will presumably claim each of these as forked
134 children momentarily. */
135
136 struct simple_pid_list
137 {
138 /* The process ID. */
139 int pid;
140
141 /* The status as reported by waitpid. */
142 int status;
143
144 /* Next in chain. */
145 struct simple_pid_list *next;
146 };
147 struct simple_pid_list *stopped_pids;
148
149 /* Trivial list manipulation functions to keep track of a list of new
150 stopped processes. */
151
152 static void
153 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
154 {
155 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
156
157 new_pid->pid = pid;
158 new_pid->status = status;
159 new_pid->next = *listp;
160 *listp = new_pid;
161 }
162
163 static int
164 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
165 {
166 struct simple_pid_list **p;
167
168 for (p = listp; *p != NULL; p = &(*p)->next)
169 if ((*p)->pid == pid)
170 {
171 struct simple_pid_list *next = (*p)->next;
172
173 *statusp = (*p)->status;
174 xfree (*p);
175 *p = next;
176 return 1;
177 }
178 return 0;
179 }
180
181 enum stopping_threads_kind
182 {
183 /* Not stopping threads presently. */
184 NOT_STOPPING_THREADS,
185
186 /* Stopping threads. */
187 STOPPING_THREADS,
188
189 /* Stopping and suspending threads. */
190 STOPPING_AND_SUSPENDING_THREADS
191 };
192
193 /* This is set while stop_all_lwps is in effect. */
194 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
195
196 /* FIXME make into a target method? */
197 int using_threads = 1;
198
199 /* True if we're presently stabilizing threads (moving them out of
200 jump pads). */
201 static int stabilizing_threads;
202
203 /* This flag is true iff we've just created or attached to our first
204 inferior but it has not stopped yet. As soon as it does, we need
205 to call the low target's arch_setup callback. Doing this only on
206 the first inferior avoids reinializing the architecture on every
207 inferior, and avoids messing with the register caches of the
208 already running inferiors. NOTE: this assumes all inferiors under
209 control of gdbserver have the same architecture. */
210 static int new_inferior;
211
212 static void linux_resume_one_lwp (struct lwp_info *lwp,
213 int step, int signal, siginfo_t *info);
214 static void linux_resume (struct thread_resume *resume_info, size_t n);
215 static void stop_all_lwps (int suspend, struct lwp_info *except);
216 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
217 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
218 static void *add_lwp (ptid_t ptid);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info *lwp);
223 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
224 static int kill_lwp (unsigned long lwpid, int signo);
225 static void linux_enable_event_reporting (int pid);
226
227 /* True if the low target can hardware single-step. Such targets
228 don't need a BREAKPOINT_REINSERT_ADDR callback. */
229
230 static int
231 can_hardware_single_step (void)
232 {
233 return (the_low_target.breakpoint_reinsert_addr == NULL);
234 }
235
236 /* True if the low target supports memory breakpoints. If so, we'll
237 have a GET_PC implementation. */
238
239 static int
240 supports_breakpoints (void)
241 {
242 return (the_low_target.get_pc != NULL);
243 }
244
245 /* Returns true if this target can support fast tracepoints. This
246 does not mean that the in-process agent has been loaded in the
247 inferior. */
248
249 static int
250 supports_fast_tracepoints (void)
251 {
252 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
253 }
254
255 struct pending_signals
256 {
257 int signal;
258 siginfo_t info;
259 struct pending_signals *prev;
260 };
261
262 #ifdef HAVE_LINUX_REGSETS
263 static char *disabled_regsets;
264 static int num_regsets;
265 #endif
266
267 /* The read/write ends of the pipe registered as waitable file in the
268 event loop. */
269 static int linux_event_pipe[2] = { -1, -1 };
270
271 /* True if we're currently in async mode. */
272 #define target_is_async_p() (linux_event_pipe[0] != -1)
273
274 static void send_sigstop (struct lwp_info *lwp);
275 static void wait_for_sigstop (struct inferior_list_entry *entry);
276
277 /* Return non-zero if HEADER is a 64-bit ELF file. */
278
279 static int
280 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
281 {
282 if (header->e_ident[EI_MAG0] == ELFMAG0
283 && header->e_ident[EI_MAG1] == ELFMAG1
284 && header->e_ident[EI_MAG2] == ELFMAG2
285 && header->e_ident[EI_MAG3] == ELFMAG3)
286 {
287 *machine = header->e_machine;
288 return header->e_ident[EI_CLASS] == ELFCLASS64;
289
290 }
291 *machine = EM_NONE;
292 return -1;
293 }
294
295 /* Return non-zero if FILE is a 64-bit ELF file,
296 zero if the file is not a 64-bit ELF file,
297 and -1 if the file is not accessible or doesn't exist. */
298
299 static int
300 elf_64_file_p (const char *file, unsigned int *machine)
301 {
302 Elf64_Ehdr header;
303 int fd;
304
305 fd = open (file, O_RDONLY);
306 if (fd < 0)
307 return -1;
308
309 if (read (fd, &header, sizeof (header)) != sizeof (header))
310 {
311 close (fd);
312 return 0;
313 }
314 close (fd);
315
316 return elf_64_header_p (&header, machine);
317 }
318
319 /* Accepts an integer PID; Returns true if the executable PID is
320 running is a 64-bit ELF file.. */
321
322 int
323 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
324 {
325 char file[MAXPATHLEN];
326
327 sprintf (file, "/proc/%d/exe", pid);
328 return elf_64_file_p (file, machine);
329 }
330
331 static void
332 delete_lwp (struct lwp_info *lwp)
333 {
334 remove_thread (get_lwp_thread (lwp));
335 remove_inferior (&all_lwps, &lwp->head);
336 free (lwp->arch_private);
337 free (lwp);
338 }
339
340 /* Add a process to the common process list, and set its private
341 data. */
342
343 static struct process_info *
344 linux_add_process (int pid, int attached)
345 {
346 struct process_info *proc;
347
348 /* Is this the first process? If so, then set the arch. */
349 if (all_processes.head == NULL)
350 new_inferior = 1;
351
352 proc = add_process (pid, attached);
353 proc->private = xcalloc (1, sizeof (*proc->private));
354
355 if (the_low_target.new_process != NULL)
356 proc->private->arch_private = the_low_target.new_process ();
357
358 return proc;
359 }
360
361 /* Wrapper function for waitpid which handles EINTR, and emulates
362 __WALL for systems where that is not available. */
363
364 static int
365 my_waitpid (int pid, int *status, int flags)
366 {
367 int ret, out_errno;
368
369 if (debug_threads)
370 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
371
372 if (flags & __WALL)
373 {
374 sigset_t block_mask, org_mask, wake_mask;
375 int wnohang;
376
377 wnohang = (flags & WNOHANG) != 0;
378 flags &= ~(__WALL | __WCLONE);
379 flags |= WNOHANG;
380
381 /* Block all signals while here. This avoids knowing about
382 LinuxThread's signals. */
383 sigfillset (&block_mask);
384 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
385
386 /* ... except during the sigsuspend below. */
387 sigemptyset (&wake_mask);
388
389 while (1)
390 {
391 /* Since all signals are blocked, there's no need to check
392 for EINTR here. */
393 ret = waitpid (pid, status, flags);
394 out_errno = errno;
395
396 if (ret == -1 && out_errno != ECHILD)
397 break;
398 else if (ret > 0)
399 break;
400
401 if (flags & __WCLONE)
402 {
403 /* We've tried both flavors now. If WNOHANG is set,
404 there's nothing else to do, just bail out. */
405 if (wnohang)
406 break;
407
408 if (debug_threads)
409 fprintf (stderr, "blocking\n");
410
411 /* Block waiting for signals. */
412 sigsuspend (&wake_mask);
413 }
414
415 flags ^= __WCLONE;
416 }
417
418 sigprocmask (SIG_SETMASK, &org_mask, NULL);
419 }
420 else
421 {
422 do
423 ret = waitpid (pid, status, flags);
424 while (ret == -1 && errno == EINTR);
425 out_errno = errno;
426 }
427
428 if (debug_threads)
429 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
430 pid, flags, status ? *status : -1, ret);
431
432 errno = out_errno;
433 return ret;
434 }
435
436 /* Handle a GNU/Linux extended wait response. If we see a clone
437 event, we need to add the new LWP to our list (and not report the
438 trap to higher layers). */
439
440 static void
441 handle_extended_wait (struct lwp_info *event_child, int wstat)
442 {
443 int event = wstat >> 16;
444 struct lwp_info *new_lwp;
445
446 if (event == PTRACE_EVENT_CLONE)
447 {
448 ptid_t ptid;
449 unsigned long new_pid;
450 int ret, status;
451
452 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_ARG3_TYPE) 0,
453 &new_pid);
454
455 /* If we haven't already seen the new PID stop, wait for it now. */
456 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
457 {
458 /* The new child has a pending SIGSTOP. We can't affect it until it
459 hits the SIGSTOP, but we're already attached. */
460
461 ret = my_waitpid (new_pid, &status, __WALL);
462
463 if (ret == -1)
464 perror_with_name ("waiting for new child");
465 else if (ret != new_pid)
466 warning ("wait returned unexpected PID %d", ret);
467 else if (!WIFSTOPPED (status))
468 warning ("wait returned unexpected status 0x%x", status);
469 }
470
471 ptid = ptid_build (pid_of (event_child), new_pid, 0);
472 new_lwp = (struct lwp_info *) add_lwp (ptid);
473 add_thread (ptid, new_lwp);
474
475 /* Either we're going to immediately resume the new thread
476 or leave it stopped. linux_resume_one_lwp is a nop if it
477 thinks the thread is currently running, so set this first
478 before calling linux_resume_one_lwp. */
479 new_lwp->stopped = 1;
480
481 /* If we're suspending all threads, leave this one suspended
482 too. */
483 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
484 new_lwp->suspended = 1;
485
486 /* Normally we will get the pending SIGSTOP. But in some cases
487 we might get another signal delivered to the group first.
488 If we do get another signal, be sure not to lose it. */
489 if (WSTOPSIG (status) == SIGSTOP)
490 {
491 if (stopping_threads != NOT_STOPPING_THREADS)
492 new_lwp->stop_pc = get_stop_pc (new_lwp);
493 else
494 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
495 }
496 else
497 {
498 new_lwp->stop_expected = 1;
499
500 if (stopping_threads != NOT_STOPPING_THREADS)
501 {
502 new_lwp->stop_pc = get_stop_pc (new_lwp);
503 new_lwp->status_pending_p = 1;
504 new_lwp->status_pending = status;
505 }
506 else
507 /* Pass the signal on. This is what GDB does - except
508 shouldn't we really report it instead? */
509 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
510 }
511
512 /* Always resume the current thread. If we are stopping
513 threads, it will have a pending SIGSTOP; we may as well
514 collect it now. */
515 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
516 }
517 }
518
519 /* Return the PC as read from the regcache of LWP, without any
520 adjustment. */
521
522 static CORE_ADDR
523 get_pc (struct lwp_info *lwp)
524 {
525 struct thread_info *saved_inferior;
526 struct regcache *regcache;
527 CORE_ADDR pc;
528
529 if (the_low_target.get_pc == NULL)
530 return 0;
531
532 saved_inferior = current_inferior;
533 current_inferior = get_lwp_thread (lwp);
534
535 regcache = get_thread_regcache (current_inferior, 1);
536 pc = (*the_low_target.get_pc) (regcache);
537
538 if (debug_threads)
539 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
540
541 current_inferior = saved_inferior;
542 return pc;
543 }
544
545 /* This function should only be called if LWP got a SIGTRAP.
546 The SIGTRAP could mean several things.
547
548 On i386, where decr_pc_after_break is non-zero:
549 If we were single-stepping this process using PTRACE_SINGLESTEP,
550 we will get only the one SIGTRAP (even if the instruction we
551 stepped over was a breakpoint). The value of $eip will be the
552 next instruction.
553 If we continue the process using PTRACE_CONT, we will get a
554 SIGTRAP when we hit a breakpoint. The value of $eip will be
555 the instruction after the breakpoint (i.e. needs to be
556 decremented). If we report the SIGTRAP to GDB, we must also
557 report the undecremented PC. If we cancel the SIGTRAP, we
558 must resume at the decremented PC.
559
560 (Presumably, not yet tested) On a non-decr_pc_after_break machine
561 with hardware or kernel single-step:
562 If we single-step over a breakpoint instruction, our PC will
563 point at the following instruction. If we continue and hit a
564 breakpoint instruction, our PC will point at the breakpoint
565 instruction. */
566
567 static CORE_ADDR
568 get_stop_pc (struct lwp_info *lwp)
569 {
570 CORE_ADDR stop_pc;
571
572 if (the_low_target.get_pc == NULL)
573 return 0;
574
575 stop_pc = get_pc (lwp);
576
577 if (WSTOPSIG (lwp->last_status) == SIGTRAP
578 && !lwp->stepping
579 && !lwp->stopped_by_watchpoint
580 && lwp->last_status >> 16 == 0)
581 stop_pc -= the_low_target.decr_pc_after_break;
582
583 if (debug_threads)
584 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
585
586 return stop_pc;
587 }
588
589 static void *
590 add_lwp (ptid_t ptid)
591 {
592 struct lwp_info *lwp;
593
594 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
595 memset (lwp, 0, sizeof (*lwp));
596
597 lwp->head.id = ptid;
598
599 if (the_low_target.new_thread != NULL)
600 lwp->arch_private = the_low_target.new_thread ();
601
602 add_inferior_to_list (&all_lwps, &lwp->head);
603
604 return lwp;
605 }
606
607 /* Start an inferior process and returns its pid.
608 ALLARGS is a vector of program-name and args. */
609
610 static int
611 linux_create_inferior (char *program, char **allargs)
612 {
613 #ifdef HAVE_PERSONALITY
614 int personality_orig = 0, personality_set = 0;
615 #endif
616 struct lwp_info *new_lwp;
617 int pid;
618 ptid_t ptid;
619
620 #ifdef HAVE_PERSONALITY
621 if (disable_randomization)
622 {
623 errno = 0;
624 personality_orig = personality (0xffffffff);
625 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
626 {
627 personality_set = 1;
628 personality (personality_orig | ADDR_NO_RANDOMIZE);
629 }
630 if (errno != 0 || (personality_set
631 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
632 warning ("Error disabling address space randomization: %s",
633 strerror (errno));
634 }
635 #endif
636
637 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
638 pid = vfork ();
639 #else
640 pid = fork ();
641 #endif
642 if (pid < 0)
643 perror_with_name ("fork");
644
645 if (pid == 0)
646 {
647 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
648
649 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
650 signal (__SIGRTMIN + 1, SIG_DFL);
651 #endif
652
653 setpgid (0, 0);
654
655 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
656 stdout to stderr so that inferior i/o doesn't corrupt the connection.
657 Also, redirect stdin to /dev/null. */
658 if (remote_connection_is_stdio ())
659 {
660 close (0);
661 open ("/dev/null", O_RDONLY);
662 dup2 (2, 1);
663 if (write (2, "stdin/stdout redirected\n",
664 sizeof ("stdin/stdout redirected\n") - 1) < 0)
665 {
666 /* Errors ignored. */;
667 }
668 }
669
670 execv (program, allargs);
671 if (errno == ENOENT)
672 execvp (program, allargs);
673
674 fprintf (stderr, "Cannot exec %s: %s.\n", program,
675 strerror (errno));
676 fflush (stderr);
677 _exit (0177);
678 }
679
680 #ifdef HAVE_PERSONALITY
681 if (personality_set)
682 {
683 errno = 0;
684 personality (personality_orig);
685 if (errno != 0)
686 warning ("Error restoring address space randomization: %s",
687 strerror (errno));
688 }
689 #endif
690
691 linux_add_process (pid, 0);
692
693 ptid = ptid_build (pid, pid, 0);
694 new_lwp = add_lwp (ptid);
695 add_thread (ptid, new_lwp);
696 new_lwp->must_set_ptrace_flags = 1;
697
698 return pid;
699 }
700
701 /* Attach to an inferior process. */
702
703 static void
704 linux_attach_lwp_1 (unsigned long lwpid, int initial)
705 {
706 ptid_t ptid;
707 struct lwp_info *new_lwp;
708
709 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0)
710 != 0)
711 {
712 struct buffer buffer;
713
714 if (!initial)
715 {
716 /* If we fail to attach to an LWP, just warn. */
717 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
718 strerror (errno), errno);
719 fflush (stderr);
720 return;
721 }
722
723 /* If we fail to attach to a process, report an error. */
724 buffer_init (&buffer);
725 linux_ptrace_attach_warnings (lwpid, &buffer);
726 buffer_grow_str0 (&buffer, "");
727 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
728 lwpid, strerror (errno), errno);
729 }
730
731 if (initial)
732 /* If lwp is the tgid, we handle adding existing threads later.
733 Otherwise we just add lwp without bothering about any other
734 threads. */
735 ptid = ptid_build (lwpid, lwpid, 0);
736 else
737 {
738 /* Note that extracting the pid from the current inferior is
739 safe, since we're always called in the context of the same
740 process as this new thread. */
741 int pid = pid_of (get_thread_lwp (current_inferior));
742 ptid = ptid_build (pid, lwpid, 0);
743 }
744
745 new_lwp = (struct lwp_info *) add_lwp (ptid);
746 add_thread (ptid, new_lwp);
747
748 /* We need to wait for SIGSTOP before being able to make the next
749 ptrace call on this LWP. */
750 new_lwp->must_set_ptrace_flags = 1;
751
752 if (linux_proc_pid_is_stopped (lwpid))
753 {
754 if (debug_threads)
755 fprintf (stderr,
756 "Attached to a stopped process\n");
757
758 /* The process is definitely stopped. It is in a job control
759 stop, unless the kernel predates the TASK_STOPPED /
760 TASK_TRACED distinction, in which case it might be in a
761 ptrace stop. Make sure it is in a ptrace stop; from there we
762 can kill it, signal it, et cetera.
763
764 First make sure there is a pending SIGSTOP. Since we are
765 already attached, the process can not transition from stopped
766 to running without a PTRACE_CONT; so we know this signal will
767 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
768 probably already in the queue (unless this kernel is old
769 enough to use TASK_STOPPED for ptrace stops); but since
770 SIGSTOP is not an RT signal, it can only be queued once. */
771 kill_lwp (lwpid, SIGSTOP);
772
773 /* Finally, resume the stopped process. This will deliver the
774 SIGSTOP (or a higher priority signal, just like normal
775 PTRACE_ATTACH), which we'll catch later on. */
776 ptrace (PTRACE_CONT, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
777 }
778
779 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
780 brings it to a halt.
781
782 There are several cases to consider here:
783
784 1) gdbserver has already attached to the process and is being notified
785 of a new thread that is being created.
786 In this case we should ignore that SIGSTOP and resume the
787 process. This is handled below by setting stop_expected = 1,
788 and the fact that add_thread sets last_resume_kind ==
789 resume_continue.
790
791 2) This is the first thread (the process thread), and we're attaching
792 to it via attach_inferior.
793 In this case we want the process thread to stop.
794 This is handled by having linux_attach set last_resume_kind ==
795 resume_stop after we return.
796
797 If the pid we are attaching to is also the tgid, we attach to and
798 stop all the existing threads. Otherwise, we attach to pid and
799 ignore any other threads in the same group as this pid.
800
801 3) GDB is connecting to gdbserver and is requesting an enumeration of all
802 existing threads.
803 In this case we want the thread to stop.
804 FIXME: This case is currently not properly handled.
805 We should wait for the SIGSTOP but don't. Things work apparently
806 because enough time passes between when we ptrace (ATTACH) and when
807 gdb makes the next ptrace call on the thread.
808
809 On the other hand, if we are currently trying to stop all threads, we
810 should treat the new thread as if we had sent it a SIGSTOP. This works
811 because we are guaranteed that the add_lwp call above added us to the
812 end of the list, and so the new thread has not yet reached
813 wait_for_sigstop (but will). */
814 new_lwp->stop_expected = 1;
815 }
816
817 void
818 linux_attach_lwp (unsigned long lwpid)
819 {
820 linux_attach_lwp_1 (lwpid, 0);
821 }
822
823 /* Attach to PID. If PID is the tgid, attach to it and all
824 of its threads. */
825
826 static int
827 linux_attach (unsigned long pid)
828 {
829 /* Attach to PID. We will check for other threads
830 soon. */
831 linux_attach_lwp_1 (pid, 1);
832 linux_add_process (pid, 1);
833
834 if (!non_stop)
835 {
836 struct thread_info *thread;
837
838 /* Don't ignore the initial SIGSTOP if we just attached to this
839 process. It will be collected by wait shortly. */
840 thread = find_thread_ptid (ptid_build (pid, pid, 0));
841 thread->last_resume_kind = resume_stop;
842 }
843
844 if (linux_proc_get_tgid (pid) == pid)
845 {
846 DIR *dir;
847 char pathname[128];
848
849 sprintf (pathname, "/proc/%ld/task", pid);
850
851 dir = opendir (pathname);
852
853 if (!dir)
854 {
855 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
856 fflush (stderr);
857 }
858 else
859 {
860 /* At this point we attached to the tgid. Scan the task for
861 existing threads. */
862 unsigned long lwp;
863 int new_threads_found;
864 int iterations = 0;
865 struct dirent *dp;
866
867 while (iterations < 2)
868 {
869 new_threads_found = 0;
870 /* Add all the other threads. While we go through the
871 threads, new threads may be spawned. Cycle through
872 the list of threads until we have done two iterations without
873 finding new threads. */
874 while ((dp = readdir (dir)) != NULL)
875 {
876 /* Fetch one lwp. */
877 lwp = strtoul (dp->d_name, NULL, 10);
878
879 /* Is this a new thread? */
880 if (lwp
881 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
882 {
883 linux_attach_lwp_1 (lwp, 0);
884 new_threads_found++;
885
886 if (debug_threads)
887 fprintf (stderr, "\
888 Found and attached to new lwp %ld\n", lwp);
889 }
890 }
891
892 if (!new_threads_found)
893 iterations++;
894 else
895 iterations = 0;
896
897 rewinddir (dir);
898 }
899 closedir (dir);
900 }
901 }
902
903 return 0;
904 }
905
906 struct counter
907 {
908 int pid;
909 int count;
910 };
911
912 static int
913 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
914 {
915 struct counter *counter = args;
916
917 if (ptid_get_pid (entry->id) == counter->pid)
918 {
919 if (++counter->count > 1)
920 return 1;
921 }
922
923 return 0;
924 }
925
926 static int
927 last_thread_of_process_p (struct thread_info *thread)
928 {
929 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
930 int pid = ptid_get_pid (ptid);
931 struct counter counter = { pid , 0 };
932
933 return (find_inferior (&all_threads,
934 second_thread_of_pid_p, &counter) == NULL);
935 }
936
937 /* Kill LWP. */
938
939 static void
940 linux_kill_one_lwp (struct lwp_info *lwp)
941 {
942 int pid = lwpid_of (lwp);
943
944 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
945 there is no signal context, and ptrace(PTRACE_KILL) (or
946 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
947 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
948 alternative is to kill with SIGKILL. We only need one SIGKILL
949 per process, not one for each thread. But since we still support
950 linuxthreads, and we also support debugging programs using raw
951 clone without CLONE_THREAD, we send one for each thread. For
952 years, we used PTRACE_KILL only, so we're being a bit paranoid
953 about some old kernels where PTRACE_KILL might work better
954 (dubious if there are any such, but that's why it's paranoia), so
955 we try SIGKILL first, PTRACE_KILL second, and so we're fine
956 everywhere. */
957
958 errno = 0;
959 kill (pid, SIGKILL);
960 if (debug_threads)
961 fprintf (stderr,
962 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
963 target_pid_to_str (ptid_of (lwp)),
964 errno ? strerror (errno) : "OK");
965
966 errno = 0;
967 ptrace (PTRACE_KILL, pid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
968 if (debug_threads)
969 fprintf (stderr,
970 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
971 target_pid_to_str (ptid_of (lwp)),
972 errno ? strerror (errno) : "OK");
973 }
974
975 /* Callback for `find_inferior'. Kills an lwp of a given process,
976 except the leader. */
977
978 static int
979 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
980 {
981 struct thread_info *thread = (struct thread_info *) entry;
982 struct lwp_info *lwp = get_thread_lwp (thread);
983 int wstat;
984 int pid = * (int *) args;
985
986 if (ptid_get_pid (entry->id) != pid)
987 return 0;
988
989 /* We avoid killing the first thread here, because of a Linux kernel (at
990 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
991 the children get a chance to be reaped, it will remain a zombie
992 forever. */
993
994 if (lwpid_of (lwp) == pid)
995 {
996 if (debug_threads)
997 fprintf (stderr, "lkop: is last of process %s\n",
998 target_pid_to_str (entry->id));
999 return 0;
1000 }
1001
1002 do
1003 {
1004 linux_kill_one_lwp (lwp);
1005
1006 /* Make sure it died. The loop is most likely unnecessary. */
1007 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1008 } while (pid > 0 && WIFSTOPPED (wstat));
1009
1010 return 0;
1011 }
1012
1013 static int
1014 linux_kill (int pid)
1015 {
1016 struct process_info *process;
1017 struct lwp_info *lwp;
1018 int wstat;
1019 int lwpid;
1020
1021 process = find_process_pid (pid);
1022 if (process == NULL)
1023 return -1;
1024
1025 /* If we're killing a running inferior, make sure it is stopped
1026 first, as PTRACE_KILL will not work otherwise. */
1027 stop_all_lwps (0, NULL);
1028
1029 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1030
1031 /* See the comment in linux_kill_one_lwp. We did not kill the first
1032 thread in the list, so do so now. */
1033 lwp = find_lwp_pid (pid_to_ptid (pid));
1034
1035 if (lwp == NULL)
1036 {
1037 if (debug_threads)
1038 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1039 lwpid_of (lwp), pid);
1040 }
1041 else
1042 {
1043 if (debug_threads)
1044 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1045 lwpid_of (lwp), pid);
1046
1047 do
1048 {
1049 linux_kill_one_lwp (lwp);
1050
1051 /* Make sure it died. The loop is most likely unnecessary. */
1052 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1053 } while (lwpid > 0 && WIFSTOPPED (wstat));
1054 }
1055
1056 the_target->mourn (process);
1057
1058 /* Since we presently can only stop all lwps of all processes, we
1059 need to unstop lwps of other processes. */
1060 unstop_all_lwps (0, NULL);
1061 return 0;
1062 }
1063
1064 /* Get pending signal of THREAD, for detaching purposes. This is the
1065 signal the thread last stopped for, which we need to deliver to the
1066 thread when detaching, otherwise, it'd be suppressed/lost. */
1067
1068 static int
1069 get_detach_signal (struct thread_info *thread)
1070 {
1071 enum gdb_signal signo = GDB_SIGNAL_0;
1072 int status;
1073 struct lwp_info *lp = get_thread_lwp (thread);
1074
1075 if (lp->status_pending_p)
1076 status = lp->status_pending;
1077 else
1078 {
1079 /* If the thread had been suspended by gdbserver, and it stopped
1080 cleanly, then it'll have stopped with SIGSTOP. But we don't
1081 want to deliver that SIGSTOP. */
1082 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1083 || thread->last_status.value.sig == GDB_SIGNAL_0)
1084 return 0;
1085
1086 /* Otherwise, we may need to deliver the signal we
1087 intercepted. */
1088 status = lp->last_status;
1089 }
1090
1091 if (!WIFSTOPPED (status))
1092 {
1093 if (debug_threads)
1094 fprintf (stderr,
1095 "GPS: lwp %s hasn't stopped: no pending signal\n",
1096 target_pid_to_str (ptid_of (lp)));
1097 return 0;
1098 }
1099
1100 /* Extended wait statuses aren't real SIGTRAPs. */
1101 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1102 {
1103 if (debug_threads)
1104 fprintf (stderr,
1105 "GPS: lwp %s had stopped with extended "
1106 "status: no pending signal\n",
1107 target_pid_to_str (ptid_of (lp)));
1108 return 0;
1109 }
1110
1111 signo = gdb_signal_from_host (WSTOPSIG (status));
1112
1113 if (program_signals_p && !program_signals[signo])
1114 {
1115 if (debug_threads)
1116 fprintf (stderr,
1117 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1118 target_pid_to_str (ptid_of (lp)),
1119 gdb_signal_to_string (signo));
1120 return 0;
1121 }
1122 else if (!program_signals_p
1123 /* If we have no way to know which signals GDB does not
1124 want to have passed to the program, assume
1125 SIGTRAP/SIGINT, which is GDB's default. */
1126 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1127 {
1128 if (debug_threads)
1129 fprintf (stderr,
1130 "GPS: lwp %s had signal %s, "
1131 "but we don't know if we should pass it. Default to not.\n",
1132 target_pid_to_str (ptid_of (lp)),
1133 gdb_signal_to_string (signo));
1134 return 0;
1135 }
1136 else
1137 {
1138 if (debug_threads)
1139 fprintf (stderr,
1140 "GPS: lwp %s has pending signal %s: delivering it.\n",
1141 target_pid_to_str (ptid_of (lp)),
1142 gdb_signal_to_string (signo));
1143
1144 return WSTOPSIG (status);
1145 }
1146 }
1147
1148 static int
1149 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1150 {
1151 struct thread_info *thread = (struct thread_info *) entry;
1152 struct lwp_info *lwp = get_thread_lwp (thread);
1153 int pid = * (int *) args;
1154 int sig;
1155
1156 if (ptid_get_pid (entry->id) != pid)
1157 return 0;
1158
1159 /* If there is a pending SIGSTOP, get rid of it. */
1160 if (lwp->stop_expected)
1161 {
1162 if (debug_threads)
1163 fprintf (stderr,
1164 "Sending SIGCONT to %s\n",
1165 target_pid_to_str (ptid_of (lwp)));
1166
1167 kill_lwp (lwpid_of (lwp), SIGCONT);
1168 lwp->stop_expected = 0;
1169 }
1170
1171 /* Flush any pending changes to the process's registers. */
1172 regcache_invalidate_one ((struct inferior_list_entry *)
1173 get_lwp_thread (lwp));
1174
1175 /* Pass on any pending signal for this thread. */
1176 sig = get_detach_signal (thread);
1177
1178 /* Finally, let it resume. */
1179 if (the_low_target.prepare_to_resume != NULL)
1180 the_low_target.prepare_to_resume (lwp);
1181 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1182 (PTRACE_ARG4_TYPE) (long) sig) < 0)
1183 error (_("Can't detach %s: %s"),
1184 target_pid_to_str (ptid_of (lwp)),
1185 strerror (errno));
1186
1187 delete_lwp (lwp);
1188 return 0;
1189 }
1190
1191 static int
1192 linux_detach (int pid)
1193 {
1194 struct process_info *process;
1195
1196 process = find_process_pid (pid);
1197 if (process == NULL)
1198 return -1;
1199
1200 /* Stop all threads before detaching. First, ptrace requires that
1201 the thread is stopped to sucessfully detach. Second, thread_db
1202 may need to uninstall thread event breakpoints from memory, which
1203 only works with a stopped process anyway. */
1204 stop_all_lwps (0, NULL);
1205
1206 #ifdef USE_THREAD_DB
1207 thread_db_detach (process);
1208 #endif
1209
1210 /* Stabilize threads (move out of jump pads). */
1211 stabilize_threads ();
1212
1213 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1214
1215 the_target->mourn (process);
1216
1217 /* Since we presently can only stop all lwps of all processes, we
1218 need to unstop lwps of other processes. */
1219 unstop_all_lwps (0, NULL);
1220 return 0;
1221 }
1222
1223 /* Remove all LWPs that belong to process PROC from the lwp list. */
1224
1225 static int
1226 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1227 {
1228 struct lwp_info *lwp = (struct lwp_info *) entry;
1229 struct process_info *process = proc;
1230
1231 if (pid_of (lwp) == pid_of (process))
1232 delete_lwp (lwp);
1233
1234 return 0;
1235 }
1236
1237 static void
1238 linux_mourn (struct process_info *process)
1239 {
1240 struct process_info_private *priv;
1241
1242 #ifdef USE_THREAD_DB
1243 thread_db_mourn (process);
1244 #endif
1245
1246 find_inferior (&all_lwps, delete_lwp_callback, process);
1247
1248 /* Freeing all private data. */
1249 priv = process->private;
1250 free (priv->arch_private);
1251 free (priv);
1252 process->private = NULL;
1253
1254 remove_process (process);
1255 }
1256
1257 static void
1258 linux_join (int pid)
1259 {
1260 int status, ret;
1261
1262 do {
1263 ret = my_waitpid (pid, &status, 0);
1264 if (WIFEXITED (status) || WIFSIGNALED (status))
1265 break;
1266 } while (ret != -1 || errno != ECHILD);
1267 }
1268
1269 /* Return nonzero if the given thread is still alive. */
1270 static int
1271 linux_thread_alive (ptid_t ptid)
1272 {
1273 struct lwp_info *lwp = find_lwp_pid (ptid);
1274
1275 /* We assume we always know if a thread exits. If a whole process
1276 exited but we still haven't been able to report it to GDB, we'll
1277 hold on to the last lwp of the dead process. */
1278 if (lwp != NULL)
1279 return !lwp->dead;
1280 else
1281 return 0;
1282 }
1283
1284 /* Return 1 if this lwp has an interesting status pending. */
1285 static int
1286 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1287 {
1288 struct lwp_info *lwp = (struct lwp_info *) entry;
1289 ptid_t ptid = * (ptid_t *) arg;
1290 struct thread_info *thread;
1291
1292 /* Check if we're only interested in events from a specific process
1293 or its lwps. */
1294 if (!ptid_equal (minus_one_ptid, ptid)
1295 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1296 return 0;
1297
1298 thread = get_lwp_thread (lwp);
1299
1300 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1301 report any status pending the LWP may have. */
1302 if (thread->last_resume_kind == resume_stop
1303 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1304 return 0;
1305
1306 return lwp->status_pending_p;
1307 }
1308
1309 static int
1310 same_lwp (struct inferior_list_entry *entry, void *data)
1311 {
1312 ptid_t ptid = *(ptid_t *) data;
1313 int lwp;
1314
1315 if (ptid_get_lwp (ptid) != 0)
1316 lwp = ptid_get_lwp (ptid);
1317 else
1318 lwp = ptid_get_pid (ptid);
1319
1320 if (ptid_get_lwp (entry->id) == lwp)
1321 return 1;
1322
1323 return 0;
1324 }
1325
1326 struct lwp_info *
1327 find_lwp_pid (ptid_t ptid)
1328 {
1329 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1330 }
1331
1332 static struct lwp_info *
1333 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1334 {
1335 int ret;
1336 int to_wait_for = -1;
1337 struct lwp_info *child = NULL;
1338
1339 if (debug_threads)
1340 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1341
1342 if (ptid_equal (ptid, minus_one_ptid))
1343 to_wait_for = -1; /* any child */
1344 else
1345 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1346
1347 options |= __WALL;
1348
1349 retry:
1350
1351 ret = my_waitpid (to_wait_for, wstatp, options);
1352 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1353 return NULL;
1354 else if (ret == -1)
1355 perror_with_name ("waitpid");
1356
1357 if (debug_threads
1358 && (!WIFSTOPPED (*wstatp)
1359 || (WSTOPSIG (*wstatp) != 32
1360 && WSTOPSIG (*wstatp) != 33)))
1361 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1362
1363 child = find_lwp_pid (pid_to_ptid (ret));
1364
1365 /* If we didn't find a process, one of two things presumably happened:
1366 - A process we started and then detached from has exited. Ignore it.
1367 - A process we are controlling has forked and the new child's stop
1368 was reported to us by the kernel. Save its PID. */
1369 if (child == NULL && WIFSTOPPED (*wstatp))
1370 {
1371 add_to_pid_list (&stopped_pids, ret, *wstatp);
1372 goto retry;
1373 }
1374 else if (child == NULL)
1375 goto retry;
1376
1377 child->stopped = 1;
1378
1379 child->last_status = *wstatp;
1380
1381 /* Architecture-specific setup after inferior is running.
1382 This needs to happen after we have attached to the inferior
1383 and it is stopped for the first time, but before we access
1384 any inferior registers. */
1385 if (new_inferior)
1386 {
1387 the_low_target.arch_setup ();
1388 #ifdef HAVE_LINUX_REGSETS
1389 memset (disabled_regsets, 0, num_regsets);
1390 #endif
1391 new_inferior = 0;
1392 }
1393
1394 /* Fetch the possibly triggered data watchpoint info and store it in
1395 CHILD.
1396
1397 On some archs, like x86, that use debug registers to set
1398 watchpoints, it's possible that the way to know which watched
1399 address trapped, is to check the register that is used to select
1400 which address to watch. Problem is, between setting the
1401 watchpoint and reading back which data address trapped, the user
1402 may change the set of watchpoints, and, as a consequence, GDB
1403 changes the debug registers in the inferior. To avoid reading
1404 back a stale stopped-data-address when that happens, we cache in
1405 LP the fact that a watchpoint trapped, and the corresponding data
1406 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1407 changes the debug registers meanwhile, we have the cached data we
1408 can rely on. */
1409
1410 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1411 {
1412 if (the_low_target.stopped_by_watchpoint == NULL)
1413 {
1414 child->stopped_by_watchpoint = 0;
1415 }
1416 else
1417 {
1418 struct thread_info *saved_inferior;
1419
1420 saved_inferior = current_inferior;
1421 current_inferior = get_lwp_thread (child);
1422
1423 child->stopped_by_watchpoint
1424 = the_low_target.stopped_by_watchpoint ();
1425
1426 if (child->stopped_by_watchpoint)
1427 {
1428 if (the_low_target.stopped_data_address != NULL)
1429 child->stopped_data_address
1430 = the_low_target.stopped_data_address ();
1431 else
1432 child->stopped_data_address = 0;
1433 }
1434
1435 current_inferior = saved_inferior;
1436 }
1437 }
1438
1439 /* Store the STOP_PC, with adjustment applied. This depends on the
1440 architecture being defined already (so that CHILD has a valid
1441 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1442 not). */
1443 if (WIFSTOPPED (*wstatp))
1444 child->stop_pc = get_stop_pc (child);
1445
1446 if (debug_threads
1447 && WIFSTOPPED (*wstatp)
1448 && the_low_target.get_pc != NULL)
1449 {
1450 struct thread_info *saved_inferior = current_inferior;
1451 struct regcache *regcache;
1452 CORE_ADDR pc;
1453
1454 current_inferior = get_lwp_thread (child);
1455 regcache = get_thread_regcache (current_inferior, 1);
1456 pc = (*the_low_target.get_pc) (regcache);
1457 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1458 current_inferior = saved_inferior;
1459 }
1460
1461 return child;
1462 }
1463
1464 /* This function should only be called if the LWP got a SIGTRAP.
1465
1466 Handle any tracepoint steps or hits. Return true if a tracepoint
1467 event was handled, 0 otherwise. */
1468
1469 static int
1470 handle_tracepoints (struct lwp_info *lwp)
1471 {
1472 struct thread_info *tinfo = get_lwp_thread (lwp);
1473 int tpoint_related_event = 0;
1474
1475 /* If this tracepoint hit causes a tracing stop, we'll immediately
1476 uninsert tracepoints. To do this, we temporarily pause all
1477 threads, unpatch away, and then unpause threads. We need to make
1478 sure the unpausing doesn't resume LWP too. */
1479 lwp->suspended++;
1480
1481 /* And we need to be sure that any all-threads-stopping doesn't try
1482 to move threads out of the jump pads, as it could deadlock the
1483 inferior (LWP could be in the jump pad, maybe even holding the
1484 lock.) */
1485
1486 /* Do any necessary step collect actions. */
1487 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1488
1489 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1490
1491 /* See if we just hit a tracepoint and do its main collect
1492 actions. */
1493 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1494
1495 lwp->suspended--;
1496
1497 gdb_assert (lwp->suspended == 0);
1498 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1499
1500 if (tpoint_related_event)
1501 {
1502 if (debug_threads)
1503 fprintf (stderr, "got a tracepoint event\n");
1504 return 1;
1505 }
1506
1507 return 0;
1508 }
1509
1510 /* Convenience wrapper. Returns true if LWP is presently collecting a
1511 fast tracepoint. */
1512
1513 static int
1514 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1515 struct fast_tpoint_collect_status *status)
1516 {
1517 CORE_ADDR thread_area;
1518
1519 if (the_low_target.get_thread_area == NULL)
1520 return 0;
1521
1522 /* Get the thread area address. This is used to recognize which
1523 thread is which when tracing with the in-process agent library.
1524 We don't read anything from the address, and treat it as opaque;
1525 it's the address itself that we assume is unique per-thread. */
1526 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1527 return 0;
1528
1529 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1530 }
1531
1532 /* The reason we resume in the caller, is because we want to be able
1533 to pass lwp->status_pending as WSTAT, and we need to clear
1534 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1535 refuses to resume. */
1536
1537 static int
1538 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1539 {
1540 struct thread_info *saved_inferior;
1541
1542 saved_inferior = current_inferior;
1543 current_inferior = get_lwp_thread (lwp);
1544
1545 if ((wstat == NULL
1546 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1547 && supports_fast_tracepoints ()
1548 && agent_loaded_p ())
1549 {
1550 struct fast_tpoint_collect_status status;
1551 int r;
1552
1553 if (debug_threads)
1554 fprintf (stderr, "\
1555 Checking whether LWP %ld needs to move out of the jump pad.\n",
1556 lwpid_of (lwp));
1557
1558 r = linux_fast_tracepoint_collecting (lwp, &status);
1559
1560 if (wstat == NULL
1561 || (WSTOPSIG (*wstat) != SIGILL
1562 && WSTOPSIG (*wstat) != SIGFPE
1563 && WSTOPSIG (*wstat) != SIGSEGV
1564 && WSTOPSIG (*wstat) != SIGBUS))
1565 {
1566 lwp->collecting_fast_tracepoint = r;
1567
1568 if (r != 0)
1569 {
1570 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1571 {
1572 /* Haven't executed the original instruction yet.
1573 Set breakpoint there, and wait till it's hit,
1574 then single-step until exiting the jump pad. */
1575 lwp->exit_jump_pad_bkpt
1576 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1577 }
1578
1579 if (debug_threads)
1580 fprintf (stderr, "\
1581 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1582 lwpid_of (lwp));
1583 current_inferior = saved_inferior;
1584
1585 return 1;
1586 }
1587 }
1588 else
1589 {
1590 /* If we get a synchronous signal while collecting, *and*
1591 while executing the (relocated) original instruction,
1592 reset the PC to point at the tpoint address, before
1593 reporting to GDB. Otherwise, it's an IPA lib bug: just
1594 report the signal to GDB, and pray for the best. */
1595
1596 lwp->collecting_fast_tracepoint = 0;
1597
1598 if (r != 0
1599 && (status.adjusted_insn_addr <= lwp->stop_pc
1600 && lwp->stop_pc < status.adjusted_insn_addr_end))
1601 {
1602 siginfo_t info;
1603 struct regcache *regcache;
1604
1605 /* The si_addr on a few signals references the address
1606 of the faulting instruction. Adjust that as
1607 well. */
1608 if ((WSTOPSIG (*wstat) == SIGILL
1609 || WSTOPSIG (*wstat) == SIGFPE
1610 || WSTOPSIG (*wstat) == SIGBUS
1611 || WSTOPSIG (*wstat) == SIGSEGV)
1612 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1613 (PTRACE_ARG3_TYPE) 0, &info) == 0
1614 /* Final check just to make sure we don't clobber
1615 the siginfo of non-kernel-sent signals. */
1616 && (uintptr_t) info.si_addr == lwp->stop_pc)
1617 {
1618 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1619 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1620 (PTRACE_ARG3_TYPE) 0, &info);
1621 }
1622
1623 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1624 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1625 lwp->stop_pc = status.tpoint_addr;
1626
1627 /* Cancel any fast tracepoint lock this thread was
1628 holding. */
1629 force_unlock_trace_buffer ();
1630 }
1631
1632 if (lwp->exit_jump_pad_bkpt != NULL)
1633 {
1634 if (debug_threads)
1635 fprintf (stderr,
1636 "Cancelling fast exit-jump-pad: removing bkpt. "
1637 "stopping all threads momentarily.\n");
1638
1639 stop_all_lwps (1, lwp);
1640 cancel_breakpoints ();
1641
1642 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1643 lwp->exit_jump_pad_bkpt = NULL;
1644
1645 unstop_all_lwps (1, lwp);
1646
1647 gdb_assert (lwp->suspended >= 0);
1648 }
1649 }
1650 }
1651
1652 if (debug_threads)
1653 fprintf (stderr, "\
1654 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1655 lwpid_of (lwp));
1656
1657 current_inferior = saved_inferior;
1658 return 0;
1659 }
1660
1661 /* Enqueue one signal in the "signals to report later when out of the
1662 jump pad" list. */
1663
1664 static void
1665 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1666 {
1667 struct pending_signals *p_sig;
1668
1669 if (debug_threads)
1670 fprintf (stderr, "\
1671 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1672
1673 if (debug_threads)
1674 {
1675 struct pending_signals *sig;
1676
1677 for (sig = lwp->pending_signals_to_report;
1678 sig != NULL;
1679 sig = sig->prev)
1680 fprintf (stderr,
1681 " Already queued %d\n",
1682 sig->signal);
1683
1684 fprintf (stderr, " (no more currently queued signals)\n");
1685 }
1686
1687 /* Don't enqueue non-RT signals if they are already in the deferred
1688 queue. (SIGSTOP being the easiest signal to see ending up here
1689 twice) */
1690 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1691 {
1692 struct pending_signals *sig;
1693
1694 for (sig = lwp->pending_signals_to_report;
1695 sig != NULL;
1696 sig = sig->prev)
1697 {
1698 if (sig->signal == WSTOPSIG (*wstat))
1699 {
1700 if (debug_threads)
1701 fprintf (stderr,
1702 "Not requeuing already queued non-RT signal %d"
1703 " for LWP %ld\n",
1704 sig->signal,
1705 lwpid_of (lwp));
1706 return;
1707 }
1708 }
1709 }
1710
1711 p_sig = xmalloc (sizeof (*p_sig));
1712 p_sig->prev = lwp->pending_signals_to_report;
1713 p_sig->signal = WSTOPSIG (*wstat);
1714 memset (&p_sig->info, 0, sizeof (siginfo_t));
1715 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1716 &p_sig->info);
1717
1718 lwp->pending_signals_to_report = p_sig;
1719 }
1720
1721 /* Dequeue one signal from the "signals to report later when out of
1722 the jump pad" list. */
1723
1724 static int
1725 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1726 {
1727 if (lwp->pending_signals_to_report != NULL)
1728 {
1729 struct pending_signals **p_sig;
1730
1731 p_sig = &lwp->pending_signals_to_report;
1732 while ((*p_sig)->prev != NULL)
1733 p_sig = &(*p_sig)->prev;
1734
1735 *wstat = W_STOPCODE ((*p_sig)->signal);
1736 if ((*p_sig)->info.si_signo != 0)
1737 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1738 &(*p_sig)->info);
1739 free (*p_sig);
1740 *p_sig = NULL;
1741
1742 if (debug_threads)
1743 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1744 WSTOPSIG (*wstat), lwpid_of (lwp));
1745
1746 if (debug_threads)
1747 {
1748 struct pending_signals *sig;
1749
1750 for (sig = lwp->pending_signals_to_report;
1751 sig != NULL;
1752 sig = sig->prev)
1753 fprintf (stderr,
1754 " Still queued %d\n",
1755 sig->signal);
1756
1757 fprintf (stderr, " (no more queued signals)\n");
1758 }
1759
1760 return 1;
1761 }
1762
1763 return 0;
1764 }
1765
1766 /* Arrange for a breakpoint to be hit again later. We don't keep the
1767 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1768 will handle the current event, eventually we will resume this LWP,
1769 and this breakpoint will trap again. */
1770
1771 static int
1772 cancel_breakpoint (struct lwp_info *lwp)
1773 {
1774 struct thread_info *saved_inferior;
1775
1776 /* There's nothing to do if we don't support breakpoints. */
1777 if (!supports_breakpoints ())
1778 return 0;
1779
1780 /* breakpoint_at reads from current inferior. */
1781 saved_inferior = current_inferior;
1782 current_inferior = get_lwp_thread (lwp);
1783
1784 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1785 {
1786 if (debug_threads)
1787 fprintf (stderr,
1788 "CB: Push back breakpoint for %s\n",
1789 target_pid_to_str (ptid_of (lwp)));
1790
1791 /* Back up the PC if necessary. */
1792 if (the_low_target.decr_pc_after_break)
1793 {
1794 struct regcache *regcache
1795 = get_thread_regcache (current_inferior, 1);
1796 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1797 }
1798
1799 current_inferior = saved_inferior;
1800 return 1;
1801 }
1802 else
1803 {
1804 if (debug_threads)
1805 fprintf (stderr,
1806 "CB: No breakpoint found at %s for [%s]\n",
1807 paddress (lwp->stop_pc),
1808 target_pid_to_str (ptid_of (lwp)));
1809 }
1810
1811 current_inferior = saved_inferior;
1812 return 0;
1813 }
1814
1815 /* When the event-loop is doing a step-over, this points at the thread
1816 being stepped. */
1817 ptid_t step_over_bkpt;
1818
1819 /* Wait for an event from child PID. If PID is -1, wait for any
1820 child. Store the stop status through the status pointer WSTAT.
1821 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1822 event was found and OPTIONS contains WNOHANG. Return the PID of
1823 the stopped child otherwise. */
1824
1825 static int
1826 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1827 {
1828 struct lwp_info *event_child, *requested_child;
1829 ptid_t wait_ptid;
1830
1831 event_child = NULL;
1832 requested_child = NULL;
1833
1834 /* Check for a lwp with a pending status. */
1835
1836 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1837 {
1838 event_child = (struct lwp_info *)
1839 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1840 if (debug_threads && event_child)
1841 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1842 }
1843 else
1844 {
1845 requested_child = find_lwp_pid (ptid);
1846
1847 if (stopping_threads == NOT_STOPPING_THREADS
1848 && requested_child->status_pending_p
1849 && requested_child->collecting_fast_tracepoint)
1850 {
1851 enqueue_one_deferred_signal (requested_child,
1852 &requested_child->status_pending);
1853 requested_child->status_pending_p = 0;
1854 requested_child->status_pending = 0;
1855 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1856 }
1857
1858 if (requested_child->suspended
1859 && requested_child->status_pending_p)
1860 fatal ("requesting an event out of a suspended child?");
1861
1862 if (requested_child->status_pending_p)
1863 event_child = requested_child;
1864 }
1865
1866 if (event_child != NULL)
1867 {
1868 if (debug_threads)
1869 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1870 lwpid_of (event_child), event_child->status_pending);
1871 *wstat = event_child->status_pending;
1872 event_child->status_pending_p = 0;
1873 event_child->status_pending = 0;
1874 current_inferior = get_lwp_thread (event_child);
1875 return lwpid_of (event_child);
1876 }
1877
1878 if (ptid_is_pid (ptid))
1879 {
1880 /* A request to wait for a specific tgid. This is not possible
1881 with waitpid, so instead, we wait for any child, and leave
1882 children we're not interested in right now with a pending
1883 status to report later. */
1884 wait_ptid = minus_one_ptid;
1885 }
1886 else
1887 wait_ptid = ptid;
1888
1889 /* We only enter this loop if no process has a pending wait status. Thus
1890 any action taken in response to a wait status inside this loop is
1891 responding as soon as we detect the status, not after any pending
1892 events. */
1893 while (1)
1894 {
1895 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1896
1897 if ((options & WNOHANG) && event_child == NULL)
1898 {
1899 if (debug_threads)
1900 fprintf (stderr, "WNOHANG set, no event found\n");
1901 return 0;
1902 }
1903
1904 if (event_child == NULL)
1905 error ("event from unknown child");
1906
1907 if (ptid_is_pid (ptid)
1908 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1909 {
1910 if (! WIFSTOPPED (*wstat))
1911 mark_lwp_dead (event_child, *wstat);
1912 else
1913 {
1914 event_child->status_pending_p = 1;
1915 event_child->status_pending = *wstat;
1916 }
1917 continue;
1918 }
1919
1920 current_inferior = get_lwp_thread (event_child);
1921
1922 /* Check for thread exit. */
1923 if (! WIFSTOPPED (*wstat))
1924 {
1925 if (debug_threads)
1926 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1927
1928 /* If the last thread is exiting, just return. */
1929 if (last_thread_of_process_p (current_inferior))
1930 {
1931 if (debug_threads)
1932 fprintf (stderr, "LWP %ld is last lwp of process\n",
1933 lwpid_of (event_child));
1934 return lwpid_of (event_child);
1935 }
1936
1937 if (!non_stop)
1938 {
1939 current_inferior = (struct thread_info *) all_threads.head;
1940 if (debug_threads)
1941 fprintf (stderr, "Current inferior is now %ld\n",
1942 lwpid_of (get_thread_lwp (current_inferior)));
1943 }
1944 else
1945 {
1946 current_inferior = NULL;
1947 if (debug_threads)
1948 fprintf (stderr, "Current inferior is now <NULL>\n");
1949 }
1950
1951 /* If we were waiting for this particular child to do something...
1952 well, it did something. */
1953 if (requested_child != NULL)
1954 {
1955 int lwpid = lwpid_of (event_child);
1956
1957 /* Cancel the step-over operation --- the thread that
1958 started it is gone. */
1959 if (finish_step_over (event_child))
1960 unstop_all_lwps (1, event_child);
1961 delete_lwp (event_child);
1962 return lwpid;
1963 }
1964
1965 delete_lwp (event_child);
1966
1967 /* Wait for a more interesting event. */
1968 continue;
1969 }
1970
1971 if (event_child->must_set_ptrace_flags)
1972 {
1973 linux_enable_event_reporting (lwpid_of (event_child));
1974 event_child->must_set_ptrace_flags = 0;
1975 }
1976
1977 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1978 && *wstat >> 16 != 0)
1979 {
1980 handle_extended_wait (event_child, *wstat);
1981 continue;
1982 }
1983
1984 if (WIFSTOPPED (*wstat)
1985 && WSTOPSIG (*wstat) == SIGSTOP
1986 && event_child->stop_expected)
1987 {
1988 int should_stop;
1989
1990 if (debug_threads)
1991 fprintf (stderr, "Expected stop.\n");
1992 event_child->stop_expected = 0;
1993
1994 should_stop = (current_inferior->last_resume_kind == resume_stop
1995 || stopping_threads != NOT_STOPPING_THREADS);
1996
1997 if (!should_stop)
1998 {
1999 linux_resume_one_lwp (event_child,
2000 event_child->stepping, 0, NULL);
2001 continue;
2002 }
2003 }
2004
2005 return lwpid_of (event_child);
2006 }
2007
2008 /* NOTREACHED */
2009 return 0;
2010 }
2011
2012 /* Count the LWP's that have had events. */
2013
2014 static int
2015 count_events_callback (struct inferior_list_entry *entry, void *data)
2016 {
2017 struct lwp_info *lp = (struct lwp_info *) entry;
2018 struct thread_info *thread = get_lwp_thread (lp);
2019 int *count = data;
2020
2021 gdb_assert (count != NULL);
2022
2023 /* Count only resumed LWPs that have a SIGTRAP event pending that
2024 should be reported to GDB. */
2025 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2026 && thread->last_resume_kind != resume_stop
2027 && lp->status_pending_p
2028 && WIFSTOPPED (lp->status_pending)
2029 && WSTOPSIG (lp->status_pending) == SIGTRAP
2030 && !breakpoint_inserted_here (lp->stop_pc))
2031 (*count)++;
2032
2033 return 0;
2034 }
2035
2036 /* Select the LWP (if any) that is currently being single-stepped. */
2037
2038 static int
2039 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2040 {
2041 struct lwp_info *lp = (struct lwp_info *) entry;
2042 struct thread_info *thread = get_lwp_thread (lp);
2043
2044 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2045 && thread->last_resume_kind == resume_step
2046 && lp->status_pending_p)
2047 return 1;
2048 else
2049 return 0;
2050 }
2051
2052 /* Select the Nth LWP that has had a SIGTRAP event that should be
2053 reported to GDB. */
2054
2055 static int
2056 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2057 {
2058 struct lwp_info *lp = (struct lwp_info *) entry;
2059 struct thread_info *thread = get_lwp_thread (lp);
2060 int *selector = data;
2061
2062 gdb_assert (selector != NULL);
2063
2064 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2065 if (thread->last_resume_kind != resume_stop
2066 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2067 && lp->status_pending_p
2068 && WIFSTOPPED (lp->status_pending)
2069 && WSTOPSIG (lp->status_pending) == SIGTRAP
2070 && !breakpoint_inserted_here (lp->stop_pc))
2071 if ((*selector)-- == 0)
2072 return 1;
2073
2074 return 0;
2075 }
2076
2077 static int
2078 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2079 {
2080 struct lwp_info *lp = (struct lwp_info *) entry;
2081 struct thread_info *thread = get_lwp_thread (lp);
2082 struct lwp_info *event_lp = data;
2083
2084 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2085 if (lp == event_lp)
2086 return 0;
2087
2088 /* If a LWP other than the LWP that we're reporting an event for has
2089 hit a GDB breakpoint (as opposed to some random trap signal),
2090 then just arrange for it to hit it again later. We don't keep
2091 the SIGTRAP status and don't forward the SIGTRAP signal to the
2092 LWP. We will handle the current event, eventually we will resume
2093 all LWPs, and this one will get its breakpoint trap again.
2094
2095 If we do not do this, then we run the risk that the user will
2096 delete or disable the breakpoint, but the LWP will have already
2097 tripped on it. */
2098
2099 if (thread->last_resume_kind != resume_stop
2100 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2101 && lp->status_pending_p
2102 && WIFSTOPPED (lp->status_pending)
2103 && WSTOPSIG (lp->status_pending) == SIGTRAP
2104 && !lp->stepping
2105 && !lp->stopped_by_watchpoint
2106 && cancel_breakpoint (lp))
2107 /* Throw away the SIGTRAP. */
2108 lp->status_pending_p = 0;
2109
2110 return 0;
2111 }
2112
2113 static void
2114 linux_cancel_breakpoints (void)
2115 {
2116 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2117 }
2118
2119 /* Select one LWP out of those that have events pending. */
2120
2121 static void
2122 select_event_lwp (struct lwp_info **orig_lp)
2123 {
2124 int num_events = 0;
2125 int random_selector;
2126 struct lwp_info *event_lp;
2127
2128 /* Give preference to any LWP that is being single-stepped. */
2129 event_lp
2130 = (struct lwp_info *) find_inferior (&all_lwps,
2131 select_singlestep_lwp_callback, NULL);
2132 if (event_lp != NULL)
2133 {
2134 if (debug_threads)
2135 fprintf (stderr,
2136 "SEL: Select single-step %s\n",
2137 target_pid_to_str (ptid_of (event_lp)));
2138 }
2139 else
2140 {
2141 /* No single-stepping LWP. Select one at random, out of those
2142 which have had SIGTRAP events. */
2143
2144 /* First see how many SIGTRAP events we have. */
2145 find_inferior (&all_lwps, count_events_callback, &num_events);
2146
2147 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2148 random_selector = (int)
2149 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2150
2151 if (debug_threads && num_events > 1)
2152 fprintf (stderr,
2153 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2154 num_events, random_selector);
2155
2156 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2157 select_event_lwp_callback,
2158 &random_selector);
2159 }
2160
2161 if (event_lp != NULL)
2162 {
2163 /* Switch the event LWP. */
2164 *orig_lp = event_lp;
2165 }
2166 }
2167
2168 /* Decrement the suspend count of an LWP. */
2169
2170 static int
2171 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2172 {
2173 struct lwp_info *lwp = (struct lwp_info *) entry;
2174
2175 /* Ignore EXCEPT. */
2176 if (lwp == except)
2177 return 0;
2178
2179 lwp->suspended--;
2180
2181 gdb_assert (lwp->suspended >= 0);
2182 return 0;
2183 }
2184
2185 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2186 NULL. */
2187
2188 static void
2189 unsuspend_all_lwps (struct lwp_info *except)
2190 {
2191 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2192 }
2193
2194 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2195 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2196 void *data);
2197 static int lwp_running (struct inferior_list_entry *entry, void *data);
2198 static ptid_t linux_wait_1 (ptid_t ptid,
2199 struct target_waitstatus *ourstatus,
2200 int target_options);
2201
2202 /* Stabilize threads (move out of jump pads).
2203
2204 If a thread is midway collecting a fast tracepoint, we need to
2205 finish the collection and move it out of the jump pad before
2206 reporting the signal.
2207
2208 This avoids recursion while collecting (when a signal arrives
2209 midway, and the signal handler itself collects), which would trash
2210 the trace buffer. In case the user set a breakpoint in a signal
2211 handler, this avoids the backtrace showing the jump pad, etc..
2212 Most importantly, there are certain things we can't do safely if
2213 threads are stopped in a jump pad (or in its callee's). For
2214 example:
2215
2216 - starting a new trace run. A thread still collecting the
2217 previous run, could trash the trace buffer when resumed. The trace
2218 buffer control structures would have been reset but the thread had
2219 no way to tell. The thread could even midway memcpy'ing to the
2220 buffer, which would mean that when resumed, it would clobber the
2221 trace buffer that had been set for a new run.
2222
2223 - we can't rewrite/reuse the jump pads for new tracepoints
2224 safely. Say you do tstart while a thread is stopped midway while
2225 collecting. When the thread is later resumed, it finishes the
2226 collection, and returns to the jump pad, to execute the original
2227 instruction that was under the tracepoint jump at the time the
2228 older run had been started. If the jump pad had been rewritten
2229 since for something else in the new run, the thread would now
2230 execute the wrong / random instructions. */
2231
2232 static void
2233 linux_stabilize_threads (void)
2234 {
2235 struct thread_info *save_inferior;
2236 struct lwp_info *lwp_stuck;
2237
2238 lwp_stuck
2239 = (struct lwp_info *) find_inferior (&all_lwps,
2240 stuck_in_jump_pad_callback, NULL);
2241 if (lwp_stuck != NULL)
2242 {
2243 if (debug_threads)
2244 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2245 lwpid_of (lwp_stuck));
2246 return;
2247 }
2248
2249 save_inferior = current_inferior;
2250
2251 stabilizing_threads = 1;
2252
2253 /* Kick 'em all. */
2254 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2255
2256 /* Loop until all are stopped out of the jump pads. */
2257 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2258 {
2259 struct target_waitstatus ourstatus;
2260 struct lwp_info *lwp;
2261 int wstat;
2262
2263 /* Note that we go through the full wait even loop. While
2264 moving threads out of jump pad, we need to be able to step
2265 over internal breakpoints and such. */
2266 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2267
2268 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2269 {
2270 lwp = get_thread_lwp (current_inferior);
2271
2272 /* Lock it. */
2273 lwp->suspended++;
2274
2275 if (ourstatus.value.sig != GDB_SIGNAL_0
2276 || current_inferior->last_resume_kind == resume_stop)
2277 {
2278 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2279 enqueue_one_deferred_signal (lwp, &wstat);
2280 }
2281 }
2282 }
2283
2284 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2285
2286 stabilizing_threads = 0;
2287
2288 current_inferior = save_inferior;
2289
2290 if (debug_threads)
2291 {
2292 lwp_stuck
2293 = (struct lwp_info *) find_inferior (&all_lwps,
2294 stuck_in_jump_pad_callback, NULL);
2295 if (lwp_stuck != NULL)
2296 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2297 lwpid_of (lwp_stuck));
2298 }
2299 }
2300
2301 /* Wait for process, returns status. */
2302
2303 static ptid_t
2304 linux_wait_1 (ptid_t ptid,
2305 struct target_waitstatus *ourstatus, int target_options)
2306 {
2307 int w;
2308 struct lwp_info *event_child;
2309 int options;
2310 int pid;
2311 int step_over_finished;
2312 int bp_explains_trap;
2313 int maybe_internal_trap;
2314 int report_to_gdb;
2315 int trace_event;
2316
2317 /* Translate generic target options into linux options. */
2318 options = __WALL;
2319 if (target_options & TARGET_WNOHANG)
2320 options |= WNOHANG;
2321
2322 retry:
2323 bp_explains_trap = 0;
2324 trace_event = 0;
2325 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2326
2327 /* If we were only supposed to resume one thread, only wait for
2328 that thread - if it's still alive. If it died, however - which
2329 can happen if we're coming from the thread death case below -
2330 then we need to make sure we restart the other threads. We could
2331 pick a thread at random or restart all; restarting all is less
2332 arbitrary. */
2333 if (!non_stop
2334 && !ptid_equal (cont_thread, null_ptid)
2335 && !ptid_equal (cont_thread, minus_one_ptid))
2336 {
2337 struct thread_info *thread;
2338
2339 thread = (struct thread_info *) find_inferior_id (&all_threads,
2340 cont_thread);
2341
2342 /* No stepping, no signal - unless one is pending already, of course. */
2343 if (thread == NULL)
2344 {
2345 struct thread_resume resume_info;
2346 resume_info.thread = minus_one_ptid;
2347 resume_info.kind = resume_continue;
2348 resume_info.sig = 0;
2349 linux_resume (&resume_info, 1);
2350 }
2351 else
2352 ptid = cont_thread;
2353 }
2354
2355 if (ptid_equal (step_over_bkpt, null_ptid))
2356 pid = linux_wait_for_event (ptid, &w, options);
2357 else
2358 {
2359 if (debug_threads)
2360 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2361 target_pid_to_str (step_over_bkpt));
2362 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2363 }
2364
2365 if (pid == 0) /* only if TARGET_WNOHANG */
2366 return null_ptid;
2367
2368 event_child = get_thread_lwp (current_inferior);
2369
2370 /* If we are waiting for a particular child, and it exited,
2371 linux_wait_for_event will return its exit status. Similarly if
2372 the last child exited. If this is not the last child, however,
2373 do not report it as exited until there is a 'thread exited' response
2374 available in the remote protocol. Instead, just wait for another event.
2375 This should be safe, because if the thread crashed we will already
2376 have reported the termination signal to GDB; that should stop any
2377 in-progress stepping operations, etc.
2378
2379 Report the exit status of the last thread to exit. This matches
2380 LinuxThreads' behavior. */
2381
2382 if (last_thread_of_process_p (current_inferior))
2383 {
2384 if (WIFEXITED (w) || WIFSIGNALED (w))
2385 {
2386 if (WIFEXITED (w))
2387 {
2388 ourstatus->kind = TARGET_WAITKIND_EXITED;
2389 ourstatus->value.integer = WEXITSTATUS (w);
2390
2391 if (debug_threads)
2392 fprintf (stderr,
2393 "\nChild exited with retcode = %x \n",
2394 WEXITSTATUS (w));
2395 }
2396 else
2397 {
2398 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2399 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2400
2401 if (debug_threads)
2402 fprintf (stderr,
2403 "\nChild terminated with signal = %x \n",
2404 WTERMSIG (w));
2405
2406 }
2407
2408 return ptid_of (event_child);
2409 }
2410 }
2411 else
2412 {
2413 if (!WIFSTOPPED (w))
2414 goto retry;
2415 }
2416
2417 /* If this event was not handled before, and is not a SIGTRAP, we
2418 report it. SIGILL and SIGSEGV are also treated as traps in case
2419 a breakpoint is inserted at the current PC. If this target does
2420 not support internal breakpoints at all, we also report the
2421 SIGTRAP without further processing; it's of no concern to us. */
2422 maybe_internal_trap
2423 = (supports_breakpoints ()
2424 && (WSTOPSIG (w) == SIGTRAP
2425 || ((WSTOPSIG (w) == SIGILL
2426 || WSTOPSIG (w) == SIGSEGV)
2427 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2428
2429 if (maybe_internal_trap)
2430 {
2431 /* Handle anything that requires bookkeeping before deciding to
2432 report the event or continue waiting. */
2433
2434 /* First check if we can explain the SIGTRAP with an internal
2435 breakpoint, or if we should possibly report the event to GDB.
2436 Do this before anything that may remove or insert a
2437 breakpoint. */
2438 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2439
2440 /* We have a SIGTRAP, possibly a step-over dance has just
2441 finished. If so, tweak the state machine accordingly,
2442 reinsert breakpoints and delete any reinsert (software
2443 single-step) breakpoints. */
2444 step_over_finished = finish_step_over (event_child);
2445
2446 /* Now invoke the callbacks of any internal breakpoints there. */
2447 check_breakpoints (event_child->stop_pc);
2448
2449 /* Handle tracepoint data collecting. This may overflow the
2450 trace buffer, and cause a tracing stop, removing
2451 breakpoints. */
2452 trace_event = handle_tracepoints (event_child);
2453
2454 if (bp_explains_trap)
2455 {
2456 /* If we stepped or ran into an internal breakpoint, we've
2457 already handled it. So next time we resume (from this
2458 PC), we should step over it. */
2459 if (debug_threads)
2460 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2461
2462 if (breakpoint_here (event_child->stop_pc))
2463 event_child->need_step_over = 1;
2464 }
2465 }
2466 else
2467 {
2468 /* We have some other signal, possibly a step-over dance was in
2469 progress, and it should be cancelled too. */
2470 step_over_finished = finish_step_over (event_child);
2471 }
2472
2473 /* We have all the data we need. Either report the event to GDB, or
2474 resume threads and keep waiting for more. */
2475
2476 /* If we're collecting a fast tracepoint, finish the collection and
2477 move out of the jump pad before delivering a signal. See
2478 linux_stabilize_threads. */
2479
2480 if (WIFSTOPPED (w)
2481 && WSTOPSIG (w) != SIGTRAP
2482 && supports_fast_tracepoints ()
2483 && agent_loaded_p ())
2484 {
2485 if (debug_threads)
2486 fprintf (stderr,
2487 "Got signal %d for LWP %ld. Check if we need "
2488 "to defer or adjust it.\n",
2489 WSTOPSIG (w), lwpid_of (event_child));
2490
2491 /* Allow debugging the jump pad itself. */
2492 if (current_inferior->last_resume_kind != resume_step
2493 && maybe_move_out_of_jump_pad (event_child, &w))
2494 {
2495 enqueue_one_deferred_signal (event_child, &w);
2496
2497 if (debug_threads)
2498 fprintf (stderr,
2499 "Signal %d for LWP %ld deferred (in jump pad)\n",
2500 WSTOPSIG (w), lwpid_of (event_child));
2501
2502 linux_resume_one_lwp (event_child, 0, 0, NULL);
2503 goto retry;
2504 }
2505 }
2506
2507 if (event_child->collecting_fast_tracepoint)
2508 {
2509 if (debug_threads)
2510 fprintf (stderr, "\
2511 LWP %ld was trying to move out of the jump pad (%d). \
2512 Check if we're already there.\n",
2513 lwpid_of (event_child),
2514 event_child->collecting_fast_tracepoint);
2515
2516 trace_event = 1;
2517
2518 event_child->collecting_fast_tracepoint
2519 = linux_fast_tracepoint_collecting (event_child, NULL);
2520
2521 if (event_child->collecting_fast_tracepoint != 1)
2522 {
2523 /* No longer need this breakpoint. */
2524 if (event_child->exit_jump_pad_bkpt != NULL)
2525 {
2526 if (debug_threads)
2527 fprintf (stderr,
2528 "No longer need exit-jump-pad bkpt; removing it."
2529 "stopping all threads momentarily.\n");
2530
2531 /* Other running threads could hit this breakpoint.
2532 We don't handle moribund locations like GDB does,
2533 instead we always pause all threads when removing
2534 breakpoints, so that any step-over or
2535 decr_pc_after_break adjustment is always taken
2536 care of while the breakpoint is still
2537 inserted. */
2538 stop_all_lwps (1, event_child);
2539 cancel_breakpoints ();
2540
2541 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2542 event_child->exit_jump_pad_bkpt = NULL;
2543
2544 unstop_all_lwps (1, event_child);
2545
2546 gdb_assert (event_child->suspended >= 0);
2547 }
2548 }
2549
2550 if (event_child->collecting_fast_tracepoint == 0)
2551 {
2552 if (debug_threads)
2553 fprintf (stderr,
2554 "fast tracepoint finished "
2555 "collecting successfully.\n");
2556
2557 /* We may have a deferred signal to report. */
2558 if (dequeue_one_deferred_signal (event_child, &w))
2559 {
2560 if (debug_threads)
2561 fprintf (stderr, "dequeued one signal.\n");
2562 }
2563 else
2564 {
2565 if (debug_threads)
2566 fprintf (stderr, "no deferred signals.\n");
2567
2568 if (stabilizing_threads)
2569 {
2570 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2571 ourstatus->value.sig = GDB_SIGNAL_0;
2572 return ptid_of (event_child);
2573 }
2574 }
2575 }
2576 }
2577
2578 /* Check whether GDB would be interested in this event. */
2579
2580 /* If GDB is not interested in this signal, don't stop other
2581 threads, and don't report it to GDB. Just resume the inferior
2582 right away. We do this for threading-related signals as well as
2583 any that GDB specifically requested we ignore. But never ignore
2584 SIGSTOP if we sent it ourselves, and do not ignore signals when
2585 stepping - they may require special handling to skip the signal
2586 handler. */
2587 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2588 thread library? */
2589 if (WIFSTOPPED (w)
2590 && current_inferior->last_resume_kind != resume_step
2591 && (
2592 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2593 (current_process ()->private->thread_db != NULL
2594 && (WSTOPSIG (w) == __SIGRTMIN
2595 || WSTOPSIG (w) == __SIGRTMIN + 1))
2596 ||
2597 #endif
2598 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2599 && !(WSTOPSIG (w) == SIGSTOP
2600 && current_inferior->last_resume_kind == resume_stop))))
2601 {
2602 siginfo_t info, *info_p;
2603
2604 if (debug_threads)
2605 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2606 WSTOPSIG (w), lwpid_of (event_child));
2607
2608 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2609 (PTRACE_ARG3_TYPE) 0, &info) == 0)
2610 info_p = &info;
2611 else
2612 info_p = NULL;
2613 linux_resume_one_lwp (event_child, event_child->stepping,
2614 WSTOPSIG (w), info_p);
2615 goto retry;
2616 }
2617
2618 /* If GDB wanted this thread to single step, we always want to
2619 report the SIGTRAP, and let GDB handle it. Watchpoints should
2620 always be reported. So should signals we can't explain. A
2621 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2622 not support Z0 breakpoints. If we do, we're be able to handle
2623 GDB breakpoints on top of internal breakpoints, by handling the
2624 internal breakpoint and still reporting the event to GDB. If we
2625 don't, we're out of luck, GDB won't see the breakpoint hit. */
2626 report_to_gdb = (!maybe_internal_trap
2627 || current_inferior->last_resume_kind == resume_step
2628 || event_child->stopped_by_watchpoint
2629 || (!step_over_finished
2630 && !bp_explains_trap && !trace_event)
2631 || (gdb_breakpoint_here (event_child->stop_pc)
2632 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2633 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2634
2635 run_breakpoint_commands (event_child->stop_pc);
2636
2637 /* We found no reason GDB would want us to stop. We either hit one
2638 of our own breakpoints, or finished an internal step GDB
2639 shouldn't know about. */
2640 if (!report_to_gdb)
2641 {
2642 if (debug_threads)
2643 {
2644 if (bp_explains_trap)
2645 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2646 if (step_over_finished)
2647 fprintf (stderr, "Step-over finished.\n");
2648 if (trace_event)
2649 fprintf (stderr, "Tracepoint event.\n");
2650 }
2651
2652 /* We're not reporting this breakpoint to GDB, so apply the
2653 decr_pc_after_break adjustment to the inferior's regcache
2654 ourselves. */
2655
2656 if (the_low_target.set_pc != NULL)
2657 {
2658 struct regcache *regcache
2659 = get_thread_regcache (get_lwp_thread (event_child), 1);
2660 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2661 }
2662
2663 /* We may have finished stepping over a breakpoint. If so,
2664 we've stopped and suspended all LWPs momentarily except the
2665 stepping one. This is where we resume them all again. We're
2666 going to keep waiting, so use proceed, which handles stepping
2667 over the next breakpoint. */
2668 if (debug_threads)
2669 fprintf (stderr, "proceeding all threads.\n");
2670
2671 if (step_over_finished)
2672 unsuspend_all_lwps (event_child);
2673
2674 proceed_all_lwps ();
2675 goto retry;
2676 }
2677
2678 if (debug_threads)
2679 {
2680 if (current_inferior->last_resume_kind == resume_step)
2681 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2682 if (event_child->stopped_by_watchpoint)
2683 fprintf (stderr, "Stopped by watchpoint.\n");
2684 if (gdb_breakpoint_here (event_child->stop_pc))
2685 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2686 if (debug_threads)
2687 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2688 }
2689
2690 /* Alright, we're going to report a stop. */
2691
2692 if (!non_stop && !stabilizing_threads)
2693 {
2694 /* In all-stop, stop all threads. */
2695 stop_all_lwps (0, NULL);
2696
2697 /* If we're not waiting for a specific LWP, choose an event LWP
2698 from among those that have had events. Giving equal priority
2699 to all LWPs that have had events helps prevent
2700 starvation. */
2701 if (ptid_equal (ptid, minus_one_ptid))
2702 {
2703 event_child->status_pending_p = 1;
2704 event_child->status_pending = w;
2705
2706 select_event_lwp (&event_child);
2707
2708 event_child->status_pending_p = 0;
2709 w = event_child->status_pending;
2710 }
2711
2712 /* Now that we've selected our final event LWP, cancel any
2713 breakpoints in other LWPs that have hit a GDB breakpoint.
2714 See the comment in cancel_breakpoints_callback to find out
2715 why. */
2716 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2717
2718 /* If we were going a step-over, all other threads but the stepping one
2719 had been paused in start_step_over, with their suspend counts
2720 incremented. We don't want to do a full unstop/unpause, because we're
2721 in all-stop mode (so we want threads stopped), but we still need to
2722 unsuspend the other threads, to decrement their `suspended' count
2723 back. */
2724 if (step_over_finished)
2725 unsuspend_all_lwps (event_child);
2726
2727 /* Stabilize threads (move out of jump pads). */
2728 stabilize_threads ();
2729 }
2730 else
2731 {
2732 /* If we just finished a step-over, then all threads had been
2733 momentarily paused. In all-stop, that's fine, we want
2734 threads stopped by now anyway. In non-stop, we need to
2735 re-resume threads that GDB wanted to be running. */
2736 if (step_over_finished)
2737 unstop_all_lwps (1, event_child);
2738 }
2739
2740 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2741
2742 if (current_inferior->last_resume_kind == resume_stop
2743 && WSTOPSIG (w) == SIGSTOP)
2744 {
2745 /* A thread that has been requested to stop by GDB with vCont;t,
2746 and it stopped cleanly, so report as SIG0. The use of
2747 SIGSTOP is an implementation detail. */
2748 ourstatus->value.sig = GDB_SIGNAL_0;
2749 }
2750 else if (current_inferior->last_resume_kind == resume_stop
2751 && WSTOPSIG (w) != SIGSTOP)
2752 {
2753 /* A thread that has been requested to stop by GDB with vCont;t,
2754 but, it stopped for other reasons. */
2755 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2756 }
2757 else
2758 {
2759 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2760 }
2761
2762 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2763
2764 if (debug_threads)
2765 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2766 target_pid_to_str (ptid_of (event_child)),
2767 ourstatus->kind,
2768 ourstatus->value.sig);
2769
2770 return ptid_of (event_child);
2771 }
2772
2773 /* Get rid of any pending event in the pipe. */
2774 static void
2775 async_file_flush (void)
2776 {
2777 int ret;
2778 char buf;
2779
2780 do
2781 ret = read (linux_event_pipe[0], &buf, 1);
2782 while (ret >= 0 || (ret == -1 && errno == EINTR));
2783 }
2784
2785 /* Put something in the pipe, so the event loop wakes up. */
2786 static void
2787 async_file_mark (void)
2788 {
2789 int ret;
2790
2791 async_file_flush ();
2792
2793 do
2794 ret = write (linux_event_pipe[1], "+", 1);
2795 while (ret == 0 || (ret == -1 && errno == EINTR));
2796
2797 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2798 be awakened anyway. */
2799 }
2800
2801 static ptid_t
2802 linux_wait (ptid_t ptid,
2803 struct target_waitstatus *ourstatus, int target_options)
2804 {
2805 ptid_t event_ptid;
2806
2807 if (debug_threads)
2808 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2809
2810 /* Flush the async file first. */
2811 if (target_is_async_p ())
2812 async_file_flush ();
2813
2814 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2815
2816 /* If at least one stop was reported, there may be more. A single
2817 SIGCHLD can signal more than one child stop. */
2818 if (target_is_async_p ()
2819 && (target_options & TARGET_WNOHANG) != 0
2820 && !ptid_equal (event_ptid, null_ptid))
2821 async_file_mark ();
2822
2823 return event_ptid;
2824 }
2825
2826 /* Send a signal to an LWP. */
2827
2828 static int
2829 kill_lwp (unsigned long lwpid, int signo)
2830 {
2831 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2832 fails, then we are not using nptl threads and we should be using kill. */
2833
2834 #ifdef __NR_tkill
2835 {
2836 static int tkill_failed;
2837
2838 if (!tkill_failed)
2839 {
2840 int ret;
2841
2842 errno = 0;
2843 ret = syscall (__NR_tkill, lwpid, signo);
2844 if (errno != ENOSYS)
2845 return ret;
2846 tkill_failed = 1;
2847 }
2848 }
2849 #endif
2850
2851 return kill (lwpid, signo);
2852 }
2853
2854 void
2855 linux_stop_lwp (struct lwp_info *lwp)
2856 {
2857 send_sigstop (lwp);
2858 }
2859
2860 static void
2861 send_sigstop (struct lwp_info *lwp)
2862 {
2863 int pid;
2864
2865 pid = lwpid_of (lwp);
2866
2867 /* If we already have a pending stop signal for this process, don't
2868 send another. */
2869 if (lwp->stop_expected)
2870 {
2871 if (debug_threads)
2872 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2873
2874 return;
2875 }
2876
2877 if (debug_threads)
2878 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2879
2880 lwp->stop_expected = 1;
2881 kill_lwp (pid, SIGSTOP);
2882 }
2883
2884 static int
2885 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2886 {
2887 struct lwp_info *lwp = (struct lwp_info *) entry;
2888
2889 /* Ignore EXCEPT. */
2890 if (lwp == except)
2891 return 0;
2892
2893 if (lwp->stopped)
2894 return 0;
2895
2896 send_sigstop (lwp);
2897 return 0;
2898 }
2899
2900 /* Increment the suspend count of an LWP, and stop it, if not stopped
2901 yet. */
2902 static int
2903 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2904 void *except)
2905 {
2906 struct lwp_info *lwp = (struct lwp_info *) entry;
2907
2908 /* Ignore EXCEPT. */
2909 if (lwp == except)
2910 return 0;
2911
2912 lwp->suspended++;
2913
2914 return send_sigstop_callback (entry, except);
2915 }
2916
2917 static void
2918 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2919 {
2920 /* It's dead, really. */
2921 lwp->dead = 1;
2922
2923 /* Store the exit status for later. */
2924 lwp->status_pending_p = 1;
2925 lwp->status_pending = wstat;
2926
2927 /* Prevent trying to stop it. */
2928 lwp->stopped = 1;
2929
2930 /* No further stops are expected from a dead lwp. */
2931 lwp->stop_expected = 0;
2932 }
2933
2934 static void
2935 wait_for_sigstop (struct inferior_list_entry *entry)
2936 {
2937 struct lwp_info *lwp = (struct lwp_info *) entry;
2938 struct thread_info *saved_inferior;
2939 int wstat;
2940 ptid_t saved_tid;
2941 ptid_t ptid;
2942 int pid;
2943
2944 if (lwp->stopped)
2945 {
2946 if (debug_threads)
2947 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2948 lwpid_of (lwp));
2949 return;
2950 }
2951
2952 saved_inferior = current_inferior;
2953 if (saved_inferior != NULL)
2954 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2955 else
2956 saved_tid = null_ptid; /* avoid bogus unused warning */
2957
2958 ptid = lwp->head.id;
2959
2960 if (debug_threads)
2961 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2962
2963 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2964
2965 /* If we stopped with a non-SIGSTOP signal, save it for later
2966 and record the pending SIGSTOP. If the process exited, just
2967 return. */
2968 if (WIFSTOPPED (wstat))
2969 {
2970 if (debug_threads)
2971 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2972 lwpid_of (lwp), WSTOPSIG (wstat));
2973
2974 if (WSTOPSIG (wstat) != SIGSTOP)
2975 {
2976 if (debug_threads)
2977 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2978 lwpid_of (lwp), wstat);
2979
2980 lwp->status_pending_p = 1;
2981 lwp->status_pending = wstat;
2982 }
2983 }
2984 else
2985 {
2986 if (debug_threads)
2987 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2988
2989 lwp = find_lwp_pid (pid_to_ptid (pid));
2990 if (lwp)
2991 {
2992 /* Leave this status pending for the next time we're able to
2993 report it. In the mean time, we'll report this lwp as
2994 dead to GDB, so GDB doesn't try to read registers and
2995 memory from it. This can only happen if this was the
2996 last thread of the process; otherwise, PID is removed
2997 from the thread tables before linux_wait_for_event
2998 returns. */
2999 mark_lwp_dead (lwp, wstat);
3000 }
3001 }
3002
3003 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3004 current_inferior = saved_inferior;
3005 else
3006 {
3007 if (debug_threads)
3008 fprintf (stderr, "Previously current thread died.\n");
3009
3010 if (non_stop)
3011 {
3012 /* We can't change the current inferior behind GDB's back,
3013 otherwise, a subsequent command may apply to the wrong
3014 process. */
3015 current_inferior = NULL;
3016 }
3017 else
3018 {
3019 /* Set a valid thread as current. */
3020 set_desired_inferior (0);
3021 }
3022 }
3023 }
3024
3025 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3026 move it out, because we need to report the stop event to GDB. For
3027 example, if the user puts a breakpoint in the jump pad, it's
3028 because she wants to debug it. */
3029
3030 static int
3031 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3032 {
3033 struct lwp_info *lwp = (struct lwp_info *) entry;
3034 struct thread_info *thread = get_lwp_thread (lwp);
3035
3036 gdb_assert (lwp->suspended == 0);
3037 gdb_assert (lwp->stopped);
3038
3039 /* Allow debugging the jump pad, gdb_collect, etc.. */
3040 return (supports_fast_tracepoints ()
3041 && agent_loaded_p ()
3042 && (gdb_breakpoint_here (lwp->stop_pc)
3043 || lwp->stopped_by_watchpoint
3044 || thread->last_resume_kind == resume_step)
3045 && linux_fast_tracepoint_collecting (lwp, NULL));
3046 }
3047
3048 static void
3049 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3050 {
3051 struct lwp_info *lwp = (struct lwp_info *) entry;
3052 struct thread_info *thread = get_lwp_thread (lwp);
3053 int *wstat;
3054
3055 gdb_assert (lwp->suspended == 0);
3056 gdb_assert (lwp->stopped);
3057
3058 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3059
3060 /* Allow debugging the jump pad, gdb_collect, etc. */
3061 if (!gdb_breakpoint_here (lwp->stop_pc)
3062 && !lwp->stopped_by_watchpoint
3063 && thread->last_resume_kind != resume_step
3064 && maybe_move_out_of_jump_pad (lwp, wstat))
3065 {
3066 if (debug_threads)
3067 fprintf (stderr,
3068 "LWP %ld needs stabilizing (in jump pad)\n",
3069 lwpid_of (lwp));
3070
3071 if (wstat)
3072 {
3073 lwp->status_pending_p = 0;
3074 enqueue_one_deferred_signal (lwp, wstat);
3075
3076 if (debug_threads)
3077 fprintf (stderr,
3078 "Signal %d for LWP %ld deferred "
3079 "(in jump pad)\n",
3080 WSTOPSIG (*wstat), lwpid_of (lwp));
3081 }
3082
3083 linux_resume_one_lwp (lwp, 0, 0, NULL);
3084 }
3085 else
3086 lwp->suspended++;
3087 }
3088
3089 static int
3090 lwp_running (struct inferior_list_entry *entry, void *data)
3091 {
3092 struct lwp_info *lwp = (struct lwp_info *) entry;
3093
3094 if (lwp->dead)
3095 return 0;
3096 if (lwp->stopped)
3097 return 0;
3098 return 1;
3099 }
3100
3101 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3102 If SUSPEND, then also increase the suspend count of every LWP,
3103 except EXCEPT. */
3104
3105 static void
3106 stop_all_lwps (int suspend, struct lwp_info *except)
3107 {
3108 /* Should not be called recursively. */
3109 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3110
3111 stopping_threads = (suspend
3112 ? STOPPING_AND_SUSPENDING_THREADS
3113 : STOPPING_THREADS);
3114
3115 if (suspend)
3116 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3117 else
3118 find_inferior (&all_lwps, send_sigstop_callback, except);
3119 for_each_inferior (&all_lwps, wait_for_sigstop);
3120 stopping_threads = NOT_STOPPING_THREADS;
3121 }
3122
3123 /* Resume execution of the inferior process.
3124 If STEP is nonzero, single-step it.
3125 If SIGNAL is nonzero, give it that signal. */
3126
3127 static void
3128 linux_resume_one_lwp (struct lwp_info *lwp,
3129 int step, int signal, siginfo_t *info)
3130 {
3131 struct thread_info *saved_inferior;
3132 int fast_tp_collecting;
3133
3134 if (lwp->stopped == 0)
3135 return;
3136
3137 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3138
3139 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3140
3141 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3142 user used the "jump" command, or "set $pc = foo"). */
3143 if (lwp->stop_pc != get_pc (lwp))
3144 {
3145 /* Collecting 'while-stepping' actions doesn't make sense
3146 anymore. */
3147 release_while_stepping_state_list (get_lwp_thread (lwp));
3148 }
3149
3150 /* If we have pending signals or status, and a new signal, enqueue the
3151 signal. Also enqueue the signal if we are waiting to reinsert a
3152 breakpoint; it will be picked up again below. */
3153 if (signal != 0
3154 && (lwp->status_pending_p
3155 || lwp->pending_signals != NULL
3156 || lwp->bp_reinsert != 0
3157 || fast_tp_collecting))
3158 {
3159 struct pending_signals *p_sig;
3160 p_sig = xmalloc (sizeof (*p_sig));
3161 p_sig->prev = lwp->pending_signals;
3162 p_sig->signal = signal;
3163 if (info == NULL)
3164 memset (&p_sig->info, 0, sizeof (siginfo_t));
3165 else
3166 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3167 lwp->pending_signals = p_sig;
3168 }
3169
3170 if (lwp->status_pending_p)
3171 {
3172 if (debug_threads)
3173 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3174 " has pending status\n",
3175 lwpid_of (lwp), step ? "step" : "continue", signal,
3176 lwp->stop_expected ? "expected" : "not expected");
3177 return;
3178 }
3179
3180 saved_inferior = current_inferior;
3181 current_inferior = get_lwp_thread (lwp);
3182
3183 if (debug_threads)
3184 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3185 lwpid_of (lwp), step ? "step" : "continue", signal,
3186 lwp->stop_expected ? "expected" : "not expected");
3187
3188 /* This bit needs some thinking about. If we get a signal that
3189 we must report while a single-step reinsert is still pending,
3190 we often end up resuming the thread. It might be better to
3191 (ew) allow a stack of pending events; then we could be sure that
3192 the reinsert happened right away and not lose any signals.
3193
3194 Making this stack would also shrink the window in which breakpoints are
3195 uninserted (see comment in linux_wait_for_lwp) but not enough for
3196 complete correctness, so it won't solve that problem. It may be
3197 worthwhile just to solve this one, however. */
3198 if (lwp->bp_reinsert != 0)
3199 {
3200 if (debug_threads)
3201 fprintf (stderr, " pending reinsert at 0x%s\n",
3202 paddress (lwp->bp_reinsert));
3203
3204 if (can_hardware_single_step ())
3205 {
3206 if (fast_tp_collecting == 0)
3207 {
3208 if (step == 0)
3209 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3210 if (lwp->suspended)
3211 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3212 lwp->suspended);
3213 }
3214
3215 step = 1;
3216 }
3217
3218 /* Postpone any pending signal. It was enqueued above. */
3219 signal = 0;
3220 }
3221
3222 if (fast_tp_collecting == 1)
3223 {
3224 if (debug_threads)
3225 fprintf (stderr, "\
3226 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3227 lwpid_of (lwp));
3228
3229 /* Postpone any pending signal. It was enqueued above. */
3230 signal = 0;
3231 }
3232 else if (fast_tp_collecting == 2)
3233 {
3234 if (debug_threads)
3235 fprintf (stderr, "\
3236 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3237 lwpid_of (lwp));
3238
3239 if (can_hardware_single_step ())
3240 step = 1;
3241 else
3242 fatal ("moving out of jump pad single-stepping"
3243 " not implemented on this target");
3244
3245 /* Postpone any pending signal. It was enqueued above. */
3246 signal = 0;
3247 }
3248
3249 /* If we have while-stepping actions in this thread set it stepping.
3250 If we have a signal to deliver, it may or may not be set to
3251 SIG_IGN, we don't know. Assume so, and allow collecting
3252 while-stepping into a signal handler. A possible smart thing to
3253 do would be to set an internal breakpoint at the signal return
3254 address, continue, and carry on catching this while-stepping
3255 action only when that breakpoint is hit. A future
3256 enhancement. */
3257 if (get_lwp_thread (lwp)->while_stepping != NULL
3258 && can_hardware_single_step ())
3259 {
3260 if (debug_threads)
3261 fprintf (stderr,
3262 "lwp %ld has a while-stepping action -> forcing step.\n",
3263 lwpid_of (lwp));
3264 step = 1;
3265 }
3266
3267 if (debug_threads && the_low_target.get_pc != NULL)
3268 {
3269 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3270 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3271 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3272 }
3273
3274 /* If we have pending signals, consume one unless we are trying to
3275 reinsert a breakpoint or we're trying to finish a fast tracepoint
3276 collect. */
3277 if (lwp->pending_signals != NULL
3278 && lwp->bp_reinsert == 0
3279 && fast_tp_collecting == 0)
3280 {
3281 struct pending_signals **p_sig;
3282
3283 p_sig = &lwp->pending_signals;
3284 while ((*p_sig)->prev != NULL)
3285 p_sig = &(*p_sig)->prev;
3286
3287 signal = (*p_sig)->signal;
3288 if ((*p_sig)->info.si_signo != 0)
3289 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3290 &(*p_sig)->info);
3291
3292 free (*p_sig);
3293 *p_sig = NULL;
3294 }
3295
3296 if (the_low_target.prepare_to_resume != NULL)
3297 the_low_target.prepare_to_resume (lwp);
3298
3299 regcache_invalidate_one ((struct inferior_list_entry *)
3300 get_lwp_thread (lwp));
3301 errno = 0;
3302 lwp->stopped = 0;
3303 lwp->stopped_by_watchpoint = 0;
3304 lwp->stepping = step;
3305 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3306 (PTRACE_ARG3_TYPE) 0,
3307 /* Coerce to a uintptr_t first to avoid potential gcc warning
3308 of coercing an 8 byte integer to a 4 byte pointer. */
3309 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3310
3311 current_inferior = saved_inferior;
3312 if (errno)
3313 {
3314 /* ESRCH from ptrace either means that the thread was already
3315 running (an error) or that it is gone (a race condition). If
3316 it's gone, we will get a notification the next time we wait,
3317 so we can ignore the error. We could differentiate these
3318 two, but it's tricky without waiting; the thread still exists
3319 as a zombie, so sending it signal 0 would succeed. So just
3320 ignore ESRCH. */
3321 if (errno == ESRCH)
3322 return;
3323
3324 perror_with_name ("ptrace");
3325 }
3326 }
3327
3328 struct thread_resume_array
3329 {
3330 struct thread_resume *resume;
3331 size_t n;
3332 };
3333
3334 /* This function is called once per thread. We look up the thread
3335 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3336 resume request.
3337
3338 This algorithm is O(threads * resume elements), but resume elements
3339 is small (and will remain small at least until GDB supports thread
3340 suspension). */
3341 static int
3342 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3343 {
3344 struct lwp_info *lwp;
3345 struct thread_info *thread;
3346 int ndx;
3347 struct thread_resume_array *r;
3348
3349 thread = (struct thread_info *) entry;
3350 lwp = get_thread_lwp (thread);
3351 r = arg;
3352
3353 for (ndx = 0; ndx < r->n; ndx++)
3354 {
3355 ptid_t ptid = r->resume[ndx].thread;
3356 if (ptid_equal (ptid, minus_one_ptid)
3357 || ptid_equal (ptid, entry->id)
3358 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3359 of PID'. */
3360 || (ptid_get_pid (ptid) == pid_of (lwp)
3361 && (ptid_is_pid (ptid)
3362 || ptid_get_lwp (ptid) == -1)))
3363 {
3364 if (r->resume[ndx].kind == resume_stop
3365 && thread->last_resume_kind == resume_stop)
3366 {
3367 if (debug_threads)
3368 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3369 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3370 ? "stopped"
3371 : "stopping",
3372 lwpid_of (lwp));
3373
3374 continue;
3375 }
3376
3377 lwp->resume = &r->resume[ndx];
3378 thread->last_resume_kind = lwp->resume->kind;
3379
3380 /* If we had a deferred signal to report, dequeue one now.
3381 This can happen if LWP gets more than one signal while
3382 trying to get out of a jump pad. */
3383 if (lwp->stopped
3384 && !lwp->status_pending_p
3385 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3386 {
3387 lwp->status_pending_p = 1;
3388
3389 if (debug_threads)
3390 fprintf (stderr,
3391 "Dequeueing deferred signal %d for LWP %ld, "
3392 "leaving status pending.\n",
3393 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3394 }
3395
3396 return 0;
3397 }
3398 }
3399
3400 /* No resume action for this thread. */
3401 lwp->resume = NULL;
3402
3403 return 0;
3404 }
3405
3406
3407 /* Set *FLAG_P if this lwp has an interesting status pending. */
3408 static int
3409 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3410 {
3411 struct lwp_info *lwp = (struct lwp_info *) entry;
3412
3413 /* LWPs which will not be resumed are not interesting, because
3414 we might not wait for them next time through linux_wait. */
3415 if (lwp->resume == NULL)
3416 return 0;
3417
3418 if (lwp->status_pending_p)
3419 * (int *) flag_p = 1;
3420
3421 return 0;
3422 }
3423
3424 /* Return 1 if this lwp that GDB wants running is stopped at an
3425 internal breakpoint that we need to step over. It assumes that any
3426 required STOP_PC adjustment has already been propagated to the
3427 inferior's regcache. */
3428
3429 static int
3430 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3431 {
3432 struct lwp_info *lwp = (struct lwp_info *) entry;
3433 struct thread_info *thread;
3434 struct thread_info *saved_inferior;
3435 CORE_ADDR pc;
3436
3437 /* LWPs which will not be resumed are not interesting, because we
3438 might not wait for them next time through linux_wait. */
3439
3440 if (!lwp->stopped)
3441 {
3442 if (debug_threads)
3443 fprintf (stderr,
3444 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3445 lwpid_of (lwp));
3446 return 0;
3447 }
3448
3449 thread = get_lwp_thread (lwp);
3450
3451 if (thread->last_resume_kind == resume_stop)
3452 {
3453 if (debug_threads)
3454 fprintf (stderr,
3455 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3456 lwpid_of (lwp));
3457 return 0;
3458 }
3459
3460 gdb_assert (lwp->suspended >= 0);
3461
3462 if (lwp->suspended)
3463 {
3464 if (debug_threads)
3465 fprintf (stderr,
3466 "Need step over [LWP %ld]? Ignoring, suspended\n",
3467 lwpid_of (lwp));
3468 return 0;
3469 }
3470
3471 if (!lwp->need_step_over)
3472 {
3473 if (debug_threads)
3474 fprintf (stderr,
3475 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3476 }
3477
3478 if (lwp->status_pending_p)
3479 {
3480 if (debug_threads)
3481 fprintf (stderr,
3482 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3483 lwpid_of (lwp));
3484 return 0;
3485 }
3486
3487 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3488 or we have. */
3489 pc = get_pc (lwp);
3490
3491 /* If the PC has changed since we stopped, then don't do anything,
3492 and let the breakpoint/tracepoint be hit. This happens if, for
3493 instance, GDB handled the decr_pc_after_break subtraction itself,
3494 GDB is OOL stepping this thread, or the user has issued a "jump"
3495 command, or poked thread's registers herself. */
3496 if (pc != lwp->stop_pc)
3497 {
3498 if (debug_threads)
3499 fprintf (stderr,
3500 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3501 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3502 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3503
3504 lwp->need_step_over = 0;
3505 return 0;
3506 }
3507
3508 saved_inferior = current_inferior;
3509 current_inferior = thread;
3510
3511 /* We can only step over breakpoints we know about. */
3512 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3513 {
3514 /* Don't step over a breakpoint that GDB expects to hit
3515 though. If the condition is being evaluated on the target's side
3516 and it evaluate to false, step over this breakpoint as well. */
3517 if (gdb_breakpoint_here (pc)
3518 && gdb_condition_true_at_breakpoint (pc)
3519 && gdb_no_commands_at_breakpoint (pc))
3520 {
3521 if (debug_threads)
3522 fprintf (stderr,
3523 "Need step over [LWP %ld]? yes, but found"
3524 " GDB breakpoint at 0x%s; skipping step over\n",
3525 lwpid_of (lwp), paddress (pc));
3526
3527 current_inferior = saved_inferior;
3528 return 0;
3529 }
3530 else
3531 {
3532 if (debug_threads)
3533 fprintf (stderr,
3534 "Need step over [LWP %ld]? yes, "
3535 "found breakpoint at 0x%s\n",
3536 lwpid_of (lwp), paddress (pc));
3537
3538 /* We've found an lwp that needs stepping over --- return 1 so
3539 that find_inferior stops looking. */
3540 current_inferior = saved_inferior;
3541
3542 /* If the step over is cancelled, this is set again. */
3543 lwp->need_step_over = 0;
3544 return 1;
3545 }
3546 }
3547
3548 current_inferior = saved_inferior;
3549
3550 if (debug_threads)
3551 fprintf (stderr,
3552 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3553 lwpid_of (lwp), paddress (pc));
3554
3555 return 0;
3556 }
3557
3558 /* Start a step-over operation on LWP. When LWP stopped at a
3559 breakpoint, to make progress, we need to remove the breakpoint out
3560 of the way. If we let other threads run while we do that, they may
3561 pass by the breakpoint location and miss hitting it. To avoid
3562 that, a step-over momentarily stops all threads while LWP is
3563 single-stepped while the breakpoint is temporarily uninserted from
3564 the inferior. When the single-step finishes, we reinsert the
3565 breakpoint, and let all threads that are supposed to be running,
3566 run again.
3567
3568 On targets that don't support hardware single-step, we don't
3569 currently support full software single-stepping. Instead, we only
3570 support stepping over the thread event breakpoint, by asking the
3571 low target where to place a reinsert breakpoint. Since this
3572 routine assumes the breakpoint being stepped over is a thread event
3573 breakpoint, it usually assumes the return address of the current
3574 function is a good enough place to set the reinsert breakpoint. */
3575
3576 static int
3577 start_step_over (struct lwp_info *lwp)
3578 {
3579 struct thread_info *saved_inferior;
3580 CORE_ADDR pc;
3581 int step;
3582
3583 if (debug_threads)
3584 fprintf (stderr,
3585 "Starting step-over on LWP %ld. Stopping all threads\n",
3586 lwpid_of (lwp));
3587
3588 stop_all_lwps (1, lwp);
3589 gdb_assert (lwp->suspended == 0);
3590
3591 if (debug_threads)
3592 fprintf (stderr, "Done stopping all threads for step-over.\n");
3593
3594 /* Note, we should always reach here with an already adjusted PC,
3595 either by GDB (if we're resuming due to GDB's request), or by our
3596 caller, if we just finished handling an internal breakpoint GDB
3597 shouldn't care about. */
3598 pc = get_pc (lwp);
3599
3600 saved_inferior = current_inferior;
3601 current_inferior = get_lwp_thread (lwp);
3602
3603 lwp->bp_reinsert = pc;
3604 uninsert_breakpoints_at (pc);
3605 uninsert_fast_tracepoint_jumps_at (pc);
3606
3607 if (can_hardware_single_step ())
3608 {
3609 step = 1;
3610 }
3611 else
3612 {
3613 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3614 set_reinsert_breakpoint (raddr);
3615 step = 0;
3616 }
3617
3618 current_inferior = saved_inferior;
3619
3620 linux_resume_one_lwp (lwp, step, 0, NULL);
3621
3622 /* Require next event from this LWP. */
3623 step_over_bkpt = lwp->head.id;
3624 return 1;
3625 }
3626
3627 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3628 start_step_over, if still there, and delete any reinsert
3629 breakpoints we've set, on non hardware single-step targets. */
3630
3631 static int
3632 finish_step_over (struct lwp_info *lwp)
3633 {
3634 if (lwp->bp_reinsert != 0)
3635 {
3636 if (debug_threads)
3637 fprintf (stderr, "Finished step over.\n");
3638
3639 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3640 may be no breakpoint to reinsert there by now. */
3641 reinsert_breakpoints_at (lwp->bp_reinsert);
3642 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3643
3644 lwp->bp_reinsert = 0;
3645
3646 /* Delete any software-single-step reinsert breakpoints. No
3647 longer needed. We don't have to worry about other threads
3648 hitting this trap, and later not being able to explain it,
3649 because we were stepping over a breakpoint, and we hold all
3650 threads but LWP stopped while doing that. */
3651 if (!can_hardware_single_step ())
3652 delete_reinsert_breakpoints ();
3653
3654 step_over_bkpt = null_ptid;
3655 return 1;
3656 }
3657 else
3658 return 0;
3659 }
3660
3661 /* This function is called once per thread. We check the thread's resume
3662 request, which will tell us whether to resume, step, or leave the thread
3663 stopped; and what signal, if any, it should be sent.
3664
3665 For threads which we aren't explicitly told otherwise, we preserve
3666 the stepping flag; this is used for stepping over gdbserver-placed
3667 breakpoints.
3668
3669 If pending_flags was set in any thread, we queue any needed
3670 signals, since we won't actually resume. We already have a pending
3671 event to report, so we don't need to preserve any step requests;
3672 they should be re-issued if necessary. */
3673
3674 static int
3675 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3676 {
3677 struct lwp_info *lwp;
3678 struct thread_info *thread;
3679 int step;
3680 int leave_all_stopped = * (int *) arg;
3681 int leave_pending;
3682
3683 thread = (struct thread_info *) entry;
3684 lwp = get_thread_lwp (thread);
3685
3686 if (lwp->resume == NULL)
3687 return 0;
3688
3689 if (lwp->resume->kind == resume_stop)
3690 {
3691 if (debug_threads)
3692 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3693
3694 if (!lwp->stopped)
3695 {
3696 if (debug_threads)
3697 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3698
3699 /* Stop the thread, and wait for the event asynchronously,
3700 through the event loop. */
3701 send_sigstop (lwp);
3702 }
3703 else
3704 {
3705 if (debug_threads)
3706 fprintf (stderr, "already stopped LWP %ld\n",
3707 lwpid_of (lwp));
3708
3709 /* The LWP may have been stopped in an internal event that
3710 was not meant to be notified back to GDB (e.g., gdbserver
3711 breakpoint), so we should be reporting a stop event in
3712 this case too. */
3713
3714 /* If the thread already has a pending SIGSTOP, this is a
3715 no-op. Otherwise, something later will presumably resume
3716 the thread and this will cause it to cancel any pending
3717 operation, due to last_resume_kind == resume_stop. If
3718 the thread already has a pending status to report, we
3719 will still report it the next time we wait - see
3720 status_pending_p_callback. */
3721
3722 /* If we already have a pending signal to report, then
3723 there's no need to queue a SIGSTOP, as this means we're
3724 midway through moving the LWP out of the jumppad, and we
3725 will report the pending signal as soon as that is
3726 finished. */
3727 if (lwp->pending_signals_to_report == NULL)
3728 send_sigstop (lwp);
3729 }
3730
3731 /* For stop requests, we're done. */
3732 lwp->resume = NULL;
3733 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3734 return 0;
3735 }
3736
3737 /* If this thread which is about to be resumed has a pending status,
3738 then don't resume any threads - we can just report the pending
3739 status. Make sure to queue any signals that would otherwise be
3740 sent. In all-stop mode, we do this decision based on if *any*
3741 thread has a pending status. If there's a thread that needs the
3742 step-over-breakpoint dance, then don't resume any other thread
3743 but that particular one. */
3744 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3745
3746 if (!leave_pending)
3747 {
3748 if (debug_threads)
3749 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3750
3751 step = (lwp->resume->kind == resume_step);
3752 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3753 }
3754 else
3755 {
3756 if (debug_threads)
3757 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3758
3759 /* If we have a new signal, enqueue the signal. */
3760 if (lwp->resume->sig != 0)
3761 {
3762 struct pending_signals *p_sig;
3763 p_sig = xmalloc (sizeof (*p_sig));
3764 p_sig->prev = lwp->pending_signals;
3765 p_sig->signal = lwp->resume->sig;
3766 memset (&p_sig->info, 0, sizeof (siginfo_t));
3767
3768 /* If this is the same signal we were previously stopped by,
3769 make sure to queue its siginfo. We can ignore the return
3770 value of ptrace; if it fails, we'll skip
3771 PTRACE_SETSIGINFO. */
3772 if (WIFSTOPPED (lwp->last_status)
3773 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3774 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3775 &p_sig->info);
3776
3777 lwp->pending_signals = p_sig;
3778 }
3779 }
3780
3781 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3782 lwp->resume = NULL;
3783 return 0;
3784 }
3785
3786 static void
3787 linux_resume (struct thread_resume *resume_info, size_t n)
3788 {
3789 struct thread_resume_array array = { resume_info, n };
3790 struct lwp_info *need_step_over = NULL;
3791 int any_pending;
3792 int leave_all_stopped;
3793
3794 find_inferior (&all_threads, linux_set_resume_request, &array);
3795
3796 /* If there is a thread which would otherwise be resumed, which has
3797 a pending status, then don't resume any threads - we can just
3798 report the pending status. Make sure to queue any signals that
3799 would otherwise be sent. In non-stop mode, we'll apply this
3800 logic to each thread individually. We consume all pending events
3801 before considering to start a step-over (in all-stop). */
3802 any_pending = 0;
3803 if (!non_stop)
3804 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3805
3806 /* If there is a thread which would otherwise be resumed, which is
3807 stopped at a breakpoint that needs stepping over, then don't
3808 resume any threads - have it step over the breakpoint with all
3809 other threads stopped, then resume all threads again. Make sure
3810 to queue any signals that would otherwise be delivered or
3811 queued. */
3812 if (!any_pending && supports_breakpoints ())
3813 need_step_over
3814 = (struct lwp_info *) find_inferior (&all_lwps,
3815 need_step_over_p, NULL);
3816
3817 leave_all_stopped = (need_step_over != NULL || any_pending);
3818
3819 if (debug_threads)
3820 {
3821 if (need_step_over != NULL)
3822 fprintf (stderr, "Not resuming all, need step over\n");
3823 else if (any_pending)
3824 fprintf (stderr,
3825 "Not resuming, all-stop and found "
3826 "an LWP with pending status\n");
3827 else
3828 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3829 }
3830
3831 /* Even if we're leaving threads stopped, queue all signals we'd
3832 otherwise deliver. */
3833 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3834
3835 if (need_step_over)
3836 start_step_over (need_step_over);
3837 }
3838
3839 /* This function is called once per thread. We check the thread's
3840 last resume request, which will tell us whether to resume, step, or
3841 leave the thread stopped. Any signal the client requested to be
3842 delivered has already been enqueued at this point.
3843
3844 If any thread that GDB wants running is stopped at an internal
3845 breakpoint that needs stepping over, we start a step-over operation
3846 on that particular thread, and leave all others stopped. */
3847
3848 static int
3849 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3850 {
3851 struct lwp_info *lwp = (struct lwp_info *) entry;
3852 struct thread_info *thread;
3853 int step;
3854
3855 if (lwp == except)
3856 return 0;
3857
3858 if (debug_threads)
3859 fprintf (stderr,
3860 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3861
3862 if (!lwp->stopped)
3863 {
3864 if (debug_threads)
3865 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3866 return 0;
3867 }
3868
3869 thread = get_lwp_thread (lwp);
3870
3871 if (thread->last_resume_kind == resume_stop
3872 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3873 {
3874 if (debug_threads)
3875 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3876 lwpid_of (lwp));
3877 return 0;
3878 }
3879
3880 if (lwp->status_pending_p)
3881 {
3882 if (debug_threads)
3883 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3884 lwpid_of (lwp));
3885 return 0;
3886 }
3887
3888 gdb_assert (lwp->suspended >= 0);
3889
3890 if (lwp->suspended)
3891 {
3892 if (debug_threads)
3893 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3894 return 0;
3895 }
3896
3897 if (thread->last_resume_kind == resume_stop
3898 && lwp->pending_signals_to_report == NULL
3899 && lwp->collecting_fast_tracepoint == 0)
3900 {
3901 /* We haven't reported this LWP as stopped yet (otherwise, the
3902 last_status.kind check above would catch it, and we wouldn't
3903 reach here. This LWP may have been momentarily paused by a
3904 stop_all_lwps call while handling for example, another LWP's
3905 step-over. In that case, the pending expected SIGSTOP signal
3906 that was queued at vCont;t handling time will have already
3907 been consumed by wait_for_sigstop, and so we need to requeue
3908 another one here. Note that if the LWP already has a SIGSTOP
3909 pending, this is a no-op. */
3910
3911 if (debug_threads)
3912 fprintf (stderr,
3913 "Client wants LWP %ld to stop. "
3914 "Making sure it has a SIGSTOP pending\n",
3915 lwpid_of (lwp));
3916
3917 send_sigstop (lwp);
3918 }
3919
3920 step = thread->last_resume_kind == resume_step;
3921 linux_resume_one_lwp (lwp, step, 0, NULL);
3922 return 0;
3923 }
3924
3925 static int
3926 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3927 {
3928 struct lwp_info *lwp = (struct lwp_info *) entry;
3929
3930 if (lwp == except)
3931 return 0;
3932
3933 lwp->suspended--;
3934 gdb_assert (lwp->suspended >= 0);
3935
3936 return proceed_one_lwp (entry, except);
3937 }
3938
3939 /* When we finish a step-over, set threads running again. If there's
3940 another thread that may need a step-over, now's the time to start
3941 it. Eventually, we'll move all threads past their breakpoints. */
3942
3943 static void
3944 proceed_all_lwps (void)
3945 {
3946 struct lwp_info *need_step_over;
3947
3948 /* If there is a thread which would otherwise be resumed, which is
3949 stopped at a breakpoint that needs stepping over, then don't
3950 resume any threads - have it step over the breakpoint with all
3951 other threads stopped, then resume all threads again. */
3952
3953 if (supports_breakpoints ())
3954 {
3955 need_step_over
3956 = (struct lwp_info *) find_inferior (&all_lwps,
3957 need_step_over_p, NULL);
3958
3959 if (need_step_over != NULL)
3960 {
3961 if (debug_threads)
3962 fprintf (stderr, "proceed_all_lwps: found "
3963 "thread %ld needing a step-over\n",
3964 lwpid_of (need_step_over));
3965
3966 start_step_over (need_step_over);
3967 return;
3968 }
3969 }
3970
3971 if (debug_threads)
3972 fprintf (stderr, "Proceeding, no step-over needed\n");
3973
3974 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3975 }
3976
3977 /* Stopped LWPs that the client wanted to be running, that don't have
3978 pending statuses, are set to run again, except for EXCEPT, if not
3979 NULL. This undoes a stop_all_lwps call. */
3980
3981 static void
3982 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3983 {
3984 if (debug_threads)
3985 {
3986 if (except)
3987 fprintf (stderr,
3988 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3989 else
3990 fprintf (stderr,
3991 "unstopping all lwps\n");
3992 }
3993
3994 if (unsuspend)
3995 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3996 else
3997 find_inferior (&all_lwps, proceed_one_lwp, except);
3998 }
3999
4000
4001 #ifdef HAVE_LINUX_REGSETS
4002
4003 #define use_linux_regsets 1
4004
4005 static int
4006 regsets_fetch_inferior_registers (struct regcache *regcache)
4007 {
4008 struct regset_info *regset;
4009 int saw_general_regs = 0;
4010 int pid;
4011 struct iovec iov;
4012
4013 regset = target_regsets;
4014
4015 pid = lwpid_of (get_thread_lwp (current_inferior));
4016 while (regset->size >= 0)
4017 {
4018 void *buf, *data;
4019 int nt_type, res;
4020
4021 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4022 {
4023 regset ++;
4024 continue;
4025 }
4026
4027 buf = xmalloc (regset->size);
4028
4029 nt_type = regset->nt_type;
4030 if (nt_type)
4031 {
4032 iov.iov_base = buf;
4033 iov.iov_len = regset->size;
4034 data = (void *) &iov;
4035 }
4036 else
4037 data = buf;
4038
4039 #ifndef __sparc__
4040 res = ptrace (regset->get_request, pid,
4041 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4042 #else
4043 res = ptrace (regset->get_request, pid, data, nt_type);
4044 #endif
4045 if (res < 0)
4046 {
4047 if (errno == EIO)
4048 {
4049 /* If we get EIO on a regset, do not try it again for
4050 this process. */
4051 disabled_regsets[regset - target_regsets] = 1;
4052 free (buf);
4053 continue;
4054 }
4055 else
4056 {
4057 char s[256];
4058 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4059 pid);
4060 perror (s);
4061 }
4062 }
4063 else if (regset->type == GENERAL_REGS)
4064 saw_general_regs = 1;
4065 regset->store_function (regcache, buf);
4066 regset ++;
4067 free (buf);
4068 }
4069 if (saw_general_regs)
4070 return 0;
4071 else
4072 return 1;
4073 }
4074
4075 static int
4076 regsets_store_inferior_registers (struct regcache *regcache)
4077 {
4078 struct regset_info *regset;
4079 int saw_general_regs = 0;
4080 int pid;
4081 struct iovec iov;
4082
4083 regset = target_regsets;
4084
4085 pid = lwpid_of (get_thread_lwp (current_inferior));
4086 while (regset->size >= 0)
4087 {
4088 void *buf, *data;
4089 int nt_type, res;
4090
4091 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4092 {
4093 regset ++;
4094 continue;
4095 }
4096
4097 buf = xmalloc (regset->size);
4098
4099 /* First fill the buffer with the current register set contents,
4100 in case there are any items in the kernel's regset that are
4101 not in gdbserver's regcache. */
4102
4103 nt_type = regset->nt_type;
4104 if (nt_type)
4105 {
4106 iov.iov_base = buf;
4107 iov.iov_len = regset->size;
4108 data = (void *) &iov;
4109 }
4110 else
4111 data = buf;
4112
4113 #ifndef __sparc__
4114 res = ptrace (regset->get_request, pid,
4115 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4116 #else
4117 res = ptrace (regset->get_request, pid, data, nt_type);
4118 #endif
4119
4120 if (res == 0)
4121 {
4122 /* Then overlay our cached registers on that. */
4123 regset->fill_function (regcache, buf);
4124
4125 /* Only now do we write the register set. */
4126 #ifndef __sparc__
4127 res = ptrace (regset->set_request, pid,
4128 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4129 #else
4130 res = ptrace (regset->set_request, pid, data, nt_type);
4131 #endif
4132 }
4133
4134 if (res < 0)
4135 {
4136 if (errno == EIO)
4137 {
4138 /* If we get EIO on a regset, do not try it again for
4139 this process. */
4140 disabled_regsets[regset - target_regsets] = 1;
4141 free (buf);
4142 continue;
4143 }
4144 else if (errno == ESRCH)
4145 {
4146 /* At this point, ESRCH should mean the process is
4147 already gone, in which case we simply ignore attempts
4148 to change its registers. See also the related
4149 comment in linux_resume_one_lwp. */
4150 free (buf);
4151 return 0;
4152 }
4153 else
4154 {
4155 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4156 }
4157 }
4158 else if (regset->type == GENERAL_REGS)
4159 saw_general_regs = 1;
4160 regset ++;
4161 free (buf);
4162 }
4163 if (saw_general_regs)
4164 return 0;
4165 else
4166 return 1;
4167 }
4168
4169 #else /* !HAVE_LINUX_REGSETS */
4170
4171 #define use_linux_regsets 0
4172 #define regsets_fetch_inferior_registers(regcache) 1
4173 #define regsets_store_inferior_registers(regcache) 1
4174
4175 #endif
4176
4177 /* Return 1 if register REGNO is supported by one of the regset ptrace
4178 calls or 0 if it has to be transferred individually. */
4179
4180 static int
4181 linux_register_in_regsets (int regno)
4182 {
4183 unsigned char mask = 1 << (regno % 8);
4184 size_t index = regno / 8;
4185
4186 return (use_linux_regsets
4187 && (the_low_target.regset_bitmap == NULL
4188 || (the_low_target.regset_bitmap[index] & mask) != 0));
4189 }
4190
4191 #ifdef HAVE_LINUX_USRREGS
4192
4193 int
4194 register_addr (int regnum)
4195 {
4196 int addr;
4197
4198 if (regnum < 0 || regnum >= the_low_target.num_regs)
4199 error ("Invalid register number %d.", regnum);
4200
4201 addr = the_low_target.regmap[regnum];
4202
4203 return addr;
4204 }
4205
4206 /* Fetch one register. */
4207 static void
4208 fetch_register (struct regcache *regcache, int regno)
4209 {
4210 CORE_ADDR regaddr;
4211 int i, size;
4212 char *buf;
4213 int pid;
4214
4215 if (regno >= the_low_target.num_regs)
4216 return;
4217 if ((*the_low_target.cannot_fetch_register) (regno))
4218 return;
4219
4220 regaddr = register_addr (regno);
4221 if (regaddr == -1)
4222 return;
4223
4224 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4225 & -sizeof (PTRACE_XFER_TYPE));
4226 buf = alloca (size);
4227
4228 pid = lwpid_of (get_thread_lwp (current_inferior));
4229 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4230 {
4231 errno = 0;
4232 *(PTRACE_XFER_TYPE *) (buf + i) =
4233 ptrace (PTRACE_PEEKUSER, pid,
4234 /* Coerce to a uintptr_t first to avoid potential gcc warning
4235 of coercing an 8 byte integer to a 4 byte pointer. */
4236 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, (PTRACE_ARG4_TYPE) 0);
4237 regaddr += sizeof (PTRACE_XFER_TYPE);
4238 if (errno != 0)
4239 error ("reading register %d: %s", regno, strerror (errno));
4240 }
4241
4242 if (the_low_target.supply_ptrace_register)
4243 the_low_target.supply_ptrace_register (regcache, regno, buf);
4244 else
4245 supply_register (regcache, regno, buf);
4246 }
4247
4248 /* Store one register. */
4249 static void
4250 store_register (struct regcache *regcache, int regno)
4251 {
4252 CORE_ADDR regaddr;
4253 int i, size;
4254 char *buf;
4255 int pid;
4256
4257 if (regno >= the_low_target.num_regs)
4258 return;
4259 if ((*the_low_target.cannot_store_register) (regno))
4260 return;
4261
4262 regaddr = register_addr (regno);
4263 if (regaddr == -1)
4264 return;
4265
4266 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4267 & -sizeof (PTRACE_XFER_TYPE));
4268 buf = alloca (size);
4269 memset (buf, 0, size);
4270
4271 if (the_low_target.collect_ptrace_register)
4272 the_low_target.collect_ptrace_register (regcache, regno, buf);
4273 else
4274 collect_register (regcache, regno, buf);
4275
4276 pid = lwpid_of (get_thread_lwp (current_inferior));
4277 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4278 {
4279 errno = 0;
4280 ptrace (PTRACE_POKEUSER, pid,
4281 /* Coerce to a uintptr_t first to avoid potential gcc warning
4282 about coercing an 8 byte integer to a 4 byte pointer. */
4283 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4284 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4285 if (errno != 0)
4286 {
4287 /* At this point, ESRCH should mean the process is
4288 already gone, in which case we simply ignore attempts
4289 to change its registers. See also the related
4290 comment in linux_resume_one_lwp. */
4291 if (errno == ESRCH)
4292 return;
4293
4294 if ((*the_low_target.cannot_store_register) (regno) == 0)
4295 error ("writing register %d: %s", regno, strerror (errno));
4296 }
4297 regaddr += sizeof (PTRACE_XFER_TYPE);
4298 }
4299 }
4300
4301 /* Fetch all registers, or just one, from the child process.
4302 If REGNO is -1, do this for all registers, skipping any that are
4303 assumed to have been retrieved by regsets_fetch_inferior_registers,
4304 unless ALL is non-zero.
4305 Otherwise, REGNO specifies which register (so we can save time). */
4306 static void
4307 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4308 {
4309 if (regno == -1)
4310 {
4311 for (regno = 0; regno < the_low_target.num_regs; regno++)
4312 if (all || !linux_register_in_regsets (regno))
4313 fetch_register (regcache, regno);
4314 }
4315 else
4316 fetch_register (regcache, regno);
4317 }
4318
4319 /* Store our register values back into the inferior.
4320 If REGNO is -1, do this for all registers, skipping any that are
4321 assumed to have been saved by regsets_store_inferior_registers,
4322 unless ALL is non-zero.
4323 Otherwise, REGNO specifies which register (so we can save time). */
4324 static void
4325 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4326 {
4327 if (regno == -1)
4328 {
4329 for (regno = 0; regno < the_low_target.num_regs; regno++)
4330 if (all || !linux_register_in_regsets (regno))
4331 store_register (regcache, regno);
4332 }
4333 else
4334 store_register (regcache, regno);
4335 }
4336
4337 #else /* !HAVE_LINUX_USRREGS */
4338
4339 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4340 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4341
4342 #endif
4343
4344
4345 void
4346 linux_fetch_registers (struct regcache *regcache, int regno)
4347 {
4348 int use_regsets;
4349 int all = 0;
4350
4351 if (regno == -1)
4352 {
4353 if (the_low_target.fetch_register != NULL)
4354 for (regno = 0; regno < the_low_target.num_regs; regno++)
4355 (*the_low_target.fetch_register) (regcache, regno);
4356
4357 all = regsets_fetch_inferior_registers (regcache);
4358 usr_fetch_inferior_registers (regcache, -1, all);
4359 }
4360 else
4361 {
4362 if (the_low_target.fetch_register != NULL
4363 && (*the_low_target.fetch_register) (regcache, regno))
4364 return;
4365
4366 use_regsets = linux_register_in_regsets (regno);
4367 if (use_regsets)
4368 all = regsets_fetch_inferior_registers (regcache);
4369 if (!use_regsets || all)
4370 usr_fetch_inferior_registers (regcache, regno, 1);
4371 }
4372 }
4373
4374 void
4375 linux_store_registers (struct regcache *regcache, int regno)
4376 {
4377 int use_regsets;
4378 int all = 0;
4379
4380 if (regno == -1)
4381 {
4382 all = regsets_store_inferior_registers (regcache);
4383 usr_store_inferior_registers (regcache, regno, all);
4384 }
4385 else
4386 {
4387 use_regsets = linux_register_in_regsets (regno);
4388 if (use_regsets)
4389 all = regsets_store_inferior_registers (regcache);
4390 if (!use_regsets || all)
4391 usr_store_inferior_registers (regcache, regno, 1);
4392 }
4393 }
4394
4395
4396 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4397 to debugger memory starting at MYADDR. */
4398
4399 static int
4400 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4401 {
4402 int pid = lwpid_of (get_thread_lwp (current_inferior));
4403 register PTRACE_XFER_TYPE *buffer;
4404 register CORE_ADDR addr;
4405 register int count;
4406 char filename[64];
4407 register int i;
4408 int ret;
4409 int fd;
4410
4411 /* Try using /proc. Don't bother for one word. */
4412 if (len >= 3 * sizeof (long))
4413 {
4414 int bytes;
4415
4416 /* We could keep this file open and cache it - possibly one per
4417 thread. That requires some juggling, but is even faster. */
4418 sprintf (filename, "/proc/%d/mem", pid);
4419 fd = open (filename, O_RDONLY | O_LARGEFILE);
4420 if (fd == -1)
4421 goto no_proc;
4422
4423 /* If pread64 is available, use it. It's faster if the kernel
4424 supports it (only one syscall), and it's 64-bit safe even on
4425 32-bit platforms (for instance, SPARC debugging a SPARC64
4426 application). */
4427 #ifdef HAVE_PREAD64
4428 bytes = pread64 (fd, myaddr, len, memaddr);
4429 #else
4430 bytes = -1;
4431 if (lseek (fd, memaddr, SEEK_SET) != -1)
4432 bytes = read (fd, myaddr, len);
4433 #endif
4434
4435 close (fd);
4436 if (bytes == len)
4437 return 0;
4438
4439 /* Some data was read, we'll try to get the rest with ptrace. */
4440 if (bytes > 0)
4441 {
4442 memaddr += bytes;
4443 myaddr += bytes;
4444 len -= bytes;
4445 }
4446 }
4447
4448 no_proc:
4449 /* Round starting address down to longword boundary. */
4450 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4451 /* Round ending address up; get number of longwords that makes. */
4452 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4453 / sizeof (PTRACE_XFER_TYPE));
4454 /* Allocate buffer of that many longwords. */
4455 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4456
4457 /* Read all the longwords */
4458 errno = 0;
4459 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4460 {
4461 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4462 about coercing an 8 byte integer to a 4 byte pointer. */
4463 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4464 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4465 (PTRACE_ARG4_TYPE) 0);
4466 if (errno)
4467 break;
4468 }
4469 ret = errno;
4470
4471 /* Copy appropriate bytes out of the buffer. */
4472 if (i > 0)
4473 {
4474 i *= sizeof (PTRACE_XFER_TYPE);
4475 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4476 memcpy (myaddr,
4477 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4478 i < len ? i : len);
4479 }
4480
4481 return ret;
4482 }
4483
4484 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4485 memory at MEMADDR. On failure (cannot write to the inferior)
4486 returns the value of errno. Always succeeds if LEN is zero. */
4487
4488 static int
4489 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4490 {
4491 register int i;
4492 /* Round starting address down to longword boundary. */
4493 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4494 /* Round ending address up; get number of longwords that makes. */
4495 register int count
4496 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4497 / sizeof (PTRACE_XFER_TYPE);
4498
4499 /* Allocate buffer of that many longwords. */
4500 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4501 alloca (count * sizeof (PTRACE_XFER_TYPE));
4502
4503 int pid = lwpid_of (get_thread_lwp (current_inferior));
4504
4505 if (len == 0)
4506 {
4507 /* Zero length write always succeeds. */
4508 return 0;
4509 }
4510
4511 if (debug_threads)
4512 {
4513 /* Dump up to four bytes. */
4514 unsigned int val = * (unsigned int *) myaddr;
4515 if (len == 1)
4516 val = val & 0xff;
4517 else if (len == 2)
4518 val = val & 0xffff;
4519 else if (len == 3)
4520 val = val & 0xffffff;
4521 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4522 val, (long)memaddr);
4523 }
4524
4525 /* Fill start and end extra bytes of buffer with existing memory data. */
4526
4527 errno = 0;
4528 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4529 about coercing an 8 byte integer to a 4 byte pointer. */
4530 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4531 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4532 (PTRACE_ARG4_TYPE) 0);
4533 if (errno)
4534 return errno;
4535
4536 if (count > 1)
4537 {
4538 errno = 0;
4539 buffer[count - 1]
4540 = ptrace (PTRACE_PEEKTEXT, pid,
4541 /* Coerce to a uintptr_t first to avoid potential gcc warning
4542 about coercing an 8 byte integer to a 4 byte pointer. */
4543 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4544 * sizeof (PTRACE_XFER_TYPE)),
4545 (PTRACE_ARG4_TYPE) 0);
4546 if (errno)
4547 return errno;
4548 }
4549
4550 /* Copy data to be written over corresponding part of buffer. */
4551
4552 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4553 myaddr, len);
4554
4555 /* Write the entire buffer. */
4556
4557 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4558 {
4559 errno = 0;
4560 ptrace (PTRACE_POKETEXT, pid,
4561 /* Coerce to a uintptr_t first to avoid potential gcc warning
4562 about coercing an 8 byte integer to a 4 byte pointer. */
4563 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4564 (PTRACE_ARG4_TYPE) buffer[i]);
4565 if (errno)
4566 return errno;
4567 }
4568
4569 return 0;
4570 }
4571
4572 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4573 static int linux_supports_tracefork_flag;
4574
4575 static void
4576 linux_enable_event_reporting (int pid)
4577 {
4578 if (!linux_supports_tracefork_flag)
4579 return;
4580
4581 ptrace (PTRACE_SETOPTIONS, pid, (PTRACE_ARG3_TYPE) 0,
4582 (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4583 }
4584
4585 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4586
4587 static int
4588 linux_tracefork_grandchild (void *arg)
4589 {
4590 _exit (0);
4591 }
4592
4593 #define STACK_SIZE 4096
4594
4595 static int
4596 linux_tracefork_child (void *arg)
4597 {
4598 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
4599 kill (getpid (), SIGSTOP);
4600
4601 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4602
4603 if (fork () == 0)
4604 linux_tracefork_grandchild (NULL);
4605
4606 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4607
4608 #ifdef __ia64__
4609 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4610 CLONE_VM | SIGCHLD, NULL);
4611 #else
4612 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4613 CLONE_VM | SIGCHLD, NULL);
4614 #endif
4615
4616 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4617
4618 _exit (0);
4619 }
4620
4621 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4622 sure that we can enable the option, and that it had the desired
4623 effect. */
4624
4625 static void
4626 linux_test_for_tracefork (void)
4627 {
4628 int child_pid, ret, status;
4629 long second_pid;
4630 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4631 char *stack = xmalloc (STACK_SIZE * 4);
4632 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4633
4634 linux_supports_tracefork_flag = 0;
4635
4636 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4637
4638 child_pid = fork ();
4639 if (child_pid == 0)
4640 linux_tracefork_child (NULL);
4641
4642 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4643
4644 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4645 #ifdef __ia64__
4646 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4647 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4648 #else /* !__ia64__ */
4649 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4650 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4651 #endif /* !__ia64__ */
4652
4653 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4654
4655 if (child_pid == -1)
4656 perror_with_name ("clone");
4657
4658 ret = my_waitpid (child_pid, &status, 0);
4659 if (ret == -1)
4660 perror_with_name ("waitpid");
4661 else if (ret != child_pid)
4662 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4663 if (! WIFSTOPPED (status))
4664 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4665
4666 ret = ptrace (PTRACE_SETOPTIONS, child_pid, (PTRACE_ARG3_TYPE) 0,
4667 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4668 if (ret != 0)
4669 {
4670 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4671 (PTRACE_ARG4_TYPE) 0);
4672 if (ret != 0)
4673 {
4674 warning ("linux_test_for_tracefork: failed to kill child");
4675 return;
4676 }
4677
4678 ret = my_waitpid (child_pid, &status, 0);
4679 if (ret != child_pid)
4680 warning ("linux_test_for_tracefork: failed to wait for killed child");
4681 else if (!WIFSIGNALED (status))
4682 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4683 "killed child", status);
4684
4685 return;
4686 }
4687
4688 ret = ptrace (PTRACE_CONT, child_pid, (PTRACE_ARG3_TYPE) 0,
4689 (PTRACE_ARG4_TYPE) 0);
4690 if (ret != 0)
4691 warning ("linux_test_for_tracefork: failed to resume child");
4692
4693 ret = my_waitpid (child_pid, &status, 0);
4694
4695 if (ret == child_pid && WIFSTOPPED (status)
4696 && status >> 16 == PTRACE_EVENT_FORK)
4697 {
4698 second_pid = 0;
4699 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, (PTRACE_ARG3_TYPE) 0,
4700 &second_pid);
4701 if (ret == 0 && second_pid != 0)
4702 {
4703 int second_status;
4704
4705 linux_supports_tracefork_flag = 1;
4706 my_waitpid (second_pid, &second_status, 0);
4707 ret = ptrace (PTRACE_KILL, second_pid, (PTRACE_ARG3_TYPE) 0,
4708 (PTRACE_ARG4_TYPE) 0);
4709 if (ret != 0)
4710 warning ("linux_test_for_tracefork: failed to kill second child");
4711 my_waitpid (second_pid, &status, 0);
4712 }
4713 }
4714 else
4715 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4716 "(%d, status 0x%x)", ret, status);
4717
4718 do
4719 {
4720 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4721 (PTRACE_ARG4_TYPE) 0);
4722 if (ret != 0)
4723 warning ("linux_test_for_tracefork: failed to kill child");
4724 my_waitpid (child_pid, &status, 0);
4725 }
4726 while (WIFSTOPPED (status));
4727
4728 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4729 free (stack);
4730 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4731 }
4732
4733
4734 static void
4735 linux_look_up_symbols (void)
4736 {
4737 #ifdef USE_THREAD_DB
4738 struct process_info *proc = current_process ();
4739
4740 if (proc->private->thread_db != NULL)
4741 return;
4742
4743 /* If the kernel supports tracing forks then it also supports tracing
4744 clones, and then we don't need to use the magic thread event breakpoint
4745 to learn about threads. */
4746 thread_db_init (!linux_supports_tracefork_flag);
4747 #endif
4748 }
4749
4750 static void
4751 linux_request_interrupt (void)
4752 {
4753 extern unsigned long signal_pid;
4754
4755 if (!ptid_equal (cont_thread, null_ptid)
4756 && !ptid_equal (cont_thread, minus_one_ptid))
4757 {
4758 struct lwp_info *lwp;
4759 int lwpid;
4760
4761 lwp = get_thread_lwp (current_inferior);
4762 lwpid = lwpid_of (lwp);
4763 kill_lwp (lwpid, SIGINT);
4764 }
4765 else
4766 kill_lwp (signal_pid, SIGINT);
4767 }
4768
4769 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4770 to debugger memory starting at MYADDR. */
4771
4772 static int
4773 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4774 {
4775 char filename[PATH_MAX];
4776 int fd, n;
4777 int pid = lwpid_of (get_thread_lwp (current_inferior));
4778
4779 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4780
4781 fd = open (filename, O_RDONLY);
4782 if (fd < 0)
4783 return -1;
4784
4785 if (offset != (CORE_ADDR) 0
4786 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4787 n = -1;
4788 else
4789 n = read (fd, myaddr, len);
4790
4791 close (fd);
4792
4793 return n;
4794 }
4795
4796 /* These breakpoint and watchpoint related wrapper functions simply
4797 pass on the function call if the target has registered a
4798 corresponding function. */
4799
4800 static int
4801 linux_insert_point (char type, CORE_ADDR addr, int len)
4802 {
4803 if (the_low_target.insert_point != NULL)
4804 return the_low_target.insert_point (type, addr, len);
4805 else
4806 /* Unsupported (see target.h). */
4807 return 1;
4808 }
4809
4810 static int
4811 linux_remove_point (char type, CORE_ADDR addr, int len)
4812 {
4813 if (the_low_target.remove_point != NULL)
4814 return the_low_target.remove_point (type, addr, len);
4815 else
4816 /* Unsupported (see target.h). */
4817 return 1;
4818 }
4819
4820 static int
4821 linux_stopped_by_watchpoint (void)
4822 {
4823 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4824
4825 return lwp->stopped_by_watchpoint;
4826 }
4827
4828 static CORE_ADDR
4829 linux_stopped_data_address (void)
4830 {
4831 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4832
4833 return lwp->stopped_data_address;
4834 }
4835
4836 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4837 #if ! (defined(PT_TEXT_ADDR) \
4838 || defined(PT_DATA_ADDR) \
4839 || defined(PT_TEXT_END_ADDR))
4840 #if defined(__mcoldfire__)
4841 /* These should really be defined in the kernel's ptrace.h header. */
4842 #define PT_TEXT_ADDR 49*4
4843 #define PT_DATA_ADDR 50*4
4844 #define PT_TEXT_END_ADDR 51*4
4845 #elif defined(BFIN)
4846 #define PT_TEXT_ADDR 220
4847 #define PT_TEXT_END_ADDR 224
4848 #define PT_DATA_ADDR 228
4849 #elif defined(__TMS320C6X__)
4850 #define PT_TEXT_ADDR (0x10000*4)
4851 #define PT_DATA_ADDR (0x10004*4)
4852 #define PT_TEXT_END_ADDR (0x10008*4)
4853 #endif
4854 #endif
4855
4856 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4857 to tell gdb about. */
4858
4859 static int
4860 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4861 {
4862 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4863 unsigned long text, text_end, data;
4864 int pid = lwpid_of (get_thread_lwp (current_inferior));
4865
4866 errno = 0;
4867
4868 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_ADDR,
4869 (PTRACE_ARG4_TYPE) 0);
4870 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_END_ADDR,
4871 (PTRACE_ARG4_TYPE) 0);
4872 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_DATA_ADDR,
4873 (PTRACE_ARG4_TYPE) 0);
4874
4875 if (errno == 0)
4876 {
4877 /* Both text and data offsets produced at compile-time (and so
4878 used by gdb) are relative to the beginning of the program,
4879 with the data segment immediately following the text segment.
4880 However, the actual runtime layout in memory may put the data
4881 somewhere else, so when we send gdb a data base-address, we
4882 use the real data base address and subtract the compile-time
4883 data base-address from it (which is just the length of the
4884 text segment). BSS immediately follows data in both
4885 cases. */
4886 *text_p = text;
4887 *data_p = data - (text_end - text);
4888
4889 return 1;
4890 }
4891 #endif
4892 return 0;
4893 }
4894 #endif
4895
4896 static int
4897 linux_qxfer_osdata (const char *annex,
4898 unsigned char *readbuf, unsigned const char *writebuf,
4899 CORE_ADDR offset, int len)
4900 {
4901 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4902 }
4903
4904 /* Convert a native/host siginfo object, into/from the siginfo in the
4905 layout of the inferiors' architecture. */
4906
4907 static void
4908 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4909 {
4910 int done = 0;
4911
4912 if (the_low_target.siginfo_fixup != NULL)
4913 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4914
4915 /* If there was no callback, or the callback didn't do anything,
4916 then just do a straight memcpy. */
4917 if (!done)
4918 {
4919 if (direction == 1)
4920 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4921 else
4922 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4923 }
4924 }
4925
4926 static int
4927 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4928 unsigned const char *writebuf, CORE_ADDR offset, int len)
4929 {
4930 int pid;
4931 siginfo_t siginfo;
4932 char inf_siginfo[sizeof (siginfo_t)];
4933
4934 if (current_inferior == NULL)
4935 return -1;
4936
4937 pid = lwpid_of (get_thread_lwp (current_inferior));
4938
4939 if (debug_threads)
4940 fprintf (stderr, "%s siginfo for lwp %d.\n",
4941 readbuf != NULL ? "Reading" : "Writing",
4942 pid);
4943
4944 if (offset >= sizeof (siginfo))
4945 return -1;
4946
4947 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
4948 return -1;
4949
4950 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4951 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4952 inferior with a 64-bit GDBSERVER should look the same as debugging it
4953 with a 32-bit GDBSERVER, we need to convert it. */
4954 siginfo_fixup (&siginfo, inf_siginfo, 0);
4955
4956 if (offset + len > sizeof (siginfo))
4957 len = sizeof (siginfo) - offset;
4958
4959 if (readbuf != NULL)
4960 memcpy (readbuf, inf_siginfo + offset, len);
4961 else
4962 {
4963 memcpy (inf_siginfo + offset, writebuf, len);
4964
4965 /* Convert back to ptrace layout before flushing it out. */
4966 siginfo_fixup (&siginfo, inf_siginfo, 1);
4967
4968 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
4969 return -1;
4970 }
4971
4972 return len;
4973 }
4974
4975 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4976 so we notice when children change state; as the handler for the
4977 sigsuspend in my_waitpid. */
4978
4979 static void
4980 sigchld_handler (int signo)
4981 {
4982 int old_errno = errno;
4983
4984 if (debug_threads)
4985 {
4986 do
4987 {
4988 /* fprintf is not async-signal-safe, so call write
4989 directly. */
4990 if (write (2, "sigchld_handler\n",
4991 sizeof ("sigchld_handler\n") - 1) < 0)
4992 break; /* just ignore */
4993 } while (0);
4994 }
4995
4996 if (target_is_async_p ())
4997 async_file_mark (); /* trigger a linux_wait */
4998
4999 errno = old_errno;
5000 }
5001
5002 static int
5003 linux_supports_non_stop (void)
5004 {
5005 return 1;
5006 }
5007
5008 static int
5009 linux_async (int enable)
5010 {
5011 int previous = (linux_event_pipe[0] != -1);
5012
5013 if (debug_threads)
5014 fprintf (stderr, "linux_async (%d), previous=%d\n",
5015 enable, previous);
5016
5017 if (previous != enable)
5018 {
5019 sigset_t mask;
5020 sigemptyset (&mask);
5021 sigaddset (&mask, SIGCHLD);
5022
5023 sigprocmask (SIG_BLOCK, &mask, NULL);
5024
5025 if (enable)
5026 {
5027 if (pipe (linux_event_pipe) == -1)
5028 fatal ("creating event pipe failed.");
5029
5030 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5031 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5032
5033 /* Register the event loop handler. */
5034 add_file_handler (linux_event_pipe[0],
5035 handle_target_event, NULL);
5036
5037 /* Always trigger a linux_wait. */
5038 async_file_mark ();
5039 }
5040 else
5041 {
5042 delete_file_handler (linux_event_pipe[0]);
5043
5044 close (linux_event_pipe[0]);
5045 close (linux_event_pipe[1]);
5046 linux_event_pipe[0] = -1;
5047 linux_event_pipe[1] = -1;
5048 }
5049
5050 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5051 }
5052
5053 return previous;
5054 }
5055
5056 static int
5057 linux_start_non_stop (int nonstop)
5058 {
5059 /* Register or unregister from event-loop accordingly. */
5060 linux_async (nonstop);
5061 return 0;
5062 }
5063
5064 static int
5065 linux_supports_multi_process (void)
5066 {
5067 return 1;
5068 }
5069
5070 static int
5071 linux_supports_disable_randomization (void)
5072 {
5073 #ifdef HAVE_PERSONALITY
5074 return 1;
5075 #else
5076 return 0;
5077 #endif
5078 }
5079
5080 static int
5081 linux_supports_agent (void)
5082 {
5083 return 1;
5084 }
5085
5086 /* Enumerate spufs IDs for process PID. */
5087 static int
5088 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5089 {
5090 int pos = 0;
5091 int written = 0;
5092 char path[128];
5093 DIR *dir;
5094 struct dirent *entry;
5095
5096 sprintf (path, "/proc/%ld/fd", pid);
5097 dir = opendir (path);
5098 if (!dir)
5099 return -1;
5100
5101 rewinddir (dir);
5102 while ((entry = readdir (dir)) != NULL)
5103 {
5104 struct stat st;
5105 struct statfs stfs;
5106 int fd;
5107
5108 fd = atoi (entry->d_name);
5109 if (!fd)
5110 continue;
5111
5112 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5113 if (stat (path, &st) != 0)
5114 continue;
5115 if (!S_ISDIR (st.st_mode))
5116 continue;
5117
5118 if (statfs (path, &stfs) != 0)
5119 continue;
5120 if (stfs.f_type != SPUFS_MAGIC)
5121 continue;
5122
5123 if (pos >= offset && pos + 4 <= offset + len)
5124 {
5125 *(unsigned int *)(buf + pos - offset) = fd;
5126 written += 4;
5127 }
5128 pos += 4;
5129 }
5130
5131 closedir (dir);
5132 return written;
5133 }
5134
5135 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5136 object type, using the /proc file system. */
5137 static int
5138 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5139 unsigned const char *writebuf,
5140 CORE_ADDR offset, int len)
5141 {
5142 long pid = lwpid_of (get_thread_lwp (current_inferior));
5143 char buf[128];
5144 int fd = 0;
5145 int ret = 0;
5146
5147 if (!writebuf && !readbuf)
5148 return -1;
5149
5150 if (!*annex)
5151 {
5152 if (!readbuf)
5153 return -1;
5154 else
5155 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5156 }
5157
5158 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5159 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5160 if (fd <= 0)
5161 return -1;
5162
5163 if (offset != 0
5164 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5165 {
5166 close (fd);
5167 return 0;
5168 }
5169
5170 if (writebuf)
5171 ret = write (fd, writebuf, (size_t) len);
5172 else
5173 ret = read (fd, readbuf, (size_t) len);
5174
5175 close (fd);
5176 return ret;
5177 }
5178
5179 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5180 struct target_loadseg
5181 {
5182 /* Core address to which the segment is mapped. */
5183 Elf32_Addr addr;
5184 /* VMA recorded in the program header. */
5185 Elf32_Addr p_vaddr;
5186 /* Size of this segment in memory. */
5187 Elf32_Word p_memsz;
5188 };
5189
5190 # if defined PT_GETDSBT
5191 struct target_loadmap
5192 {
5193 /* Protocol version number, must be zero. */
5194 Elf32_Word version;
5195 /* Pointer to the DSBT table, its size, and the DSBT index. */
5196 unsigned *dsbt_table;
5197 unsigned dsbt_size, dsbt_index;
5198 /* Number of segments in this map. */
5199 Elf32_Word nsegs;
5200 /* The actual memory map. */
5201 struct target_loadseg segs[/*nsegs*/];
5202 };
5203 # define LINUX_LOADMAP PT_GETDSBT
5204 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5205 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5206 # else
5207 struct target_loadmap
5208 {
5209 /* Protocol version number, must be zero. */
5210 Elf32_Half version;
5211 /* Number of segments in this map. */
5212 Elf32_Half nsegs;
5213 /* The actual memory map. */
5214 struct target_loadseg segs[/*nsegs*/];
5215 };
5216 # define LINUX_LOADMAP PTRACE_GETFDPIC
5217 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5218 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5219 # endif
5220
5221 static int
5222 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5223 unsigned char *myaddr, unsigned int len)
5224 {
5225 int pid = lwpid_of (get_thread_lwp (current_inferior));
5226 int addr = -1;
5227 struct target_loadmap *data = NULL;
5228 unsigned int actual_length, copy_length;
5229
5230 if (strcmp (annex, "exec") == 0)
5231 addr = (int) LINUX_LOADMAP_EXEC;
5232 else if (strcmp (annex, "interp") == 0)
5233 addr = (int) LINUX_LOADMAP_INTERP;
5234 else
5235 return -1;
5236
5237 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5238 return -1;
5239
5240 if (data == NULL)
5241 return -1;
5242
5243 actual_length = sizeof (struct target_loadmap)
5244 + sizeof (struct target_loadseg) * data->nsegs;
5245
5246 if (offset < 0 || offset > actual_length)
5247 return -1;
5248
5249 copy_length = actual_length - offset < len ? actual_length - offset : len;
5250 memcpy (myaddr, (char *) data + offset, copy_length);
5251 return copy_length;
5252 }
5253 #else
5254 # define linux_read_loadmap NULL
5255 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5256
5257 static void
5258 linux_process_qsupported (const char *query)
5259 {
5260 if (the_low_target.process_qsupported != NULL)
5261 the_low_target.process_qsupported (query);
5262 }
5263
5264 static int
5265 linux_supports_tracepoints (void)
5266 {
5267 if (*the_low_target.supports_tracepoints == NULL)
5268 return 0;
5269
5270 return (*the_low_target.supports_tracepoints) ();
5271 }
5272
5273 static CORE_ADDR
5274 linux_read_pc (struct regcache *regcache)
5275 {
5276 if (the_low_target.get_pc == NULL)
5277 return 0;
5278
5279 return (*the_low_target.get_pc) (regcache);
5280 }
5281
5282 static void
5283 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5284 {
5285 gdb_assert (the_low_target.set_pc != NULL);
5286
5287 (*the_low_target.set_pc) (regcache, pc);
5288 }
5289
5290 static int
5291 linux_thread_stopped (struct thread_info *thread)
5292 {
5293 return get_thread_lwp (thread)->stopped;
5294 }
5295
5296 /* This exposes stop-all-threads functionality to other modules. */
5297
5298 static void
5299 linux_pause_all (int freeze)
5300 {
5301 stop_all_lwps (freeze, NULL);
5302 }
5303
5304 /* This exposes unstop-all-threads functionality to other gdbserver
5305 modules. */
5306
5307 static void
5308 linux_unpause_all (int unfreeze)
5309 {
5310 unstop_all_lwps (unfreeze, NULL);
5311 }
5312
5313 static int
5314 linux_prepare_to_access_memory (void)
5315 {
5316 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5317 running LWP. */
5318 if (non_stop)
5319 linux_pause_all (1);
5320 return 0;
5321 }
5322
5323 static void
5324 linux_done_accessing_memory (void)
5325 {
5326 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5327 running LWP. */
5328 if (non_stop)
5329 linux_unpause_all (1);
5330 }
5331
5332 static int
5333 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5334 CORE_ADDR collector,
5335 CORE_ADDR lockaddr,
5336 ULONGEST orig_size,
5337 CORE_ADDR *jump_entry,
5338 CORE_ADDR *trampoline,
5339 ULONGEST *trampoline_size,
5340 unsigned char *jjump_pad_insn,
5341 ULONGEST *jjump_pad_insn_size,
5342 CORE_ADDR *adjusted_insn_addr,
5343 CORE_ADDR *adjusted_insn_addr_end,
5344 char *err)
5345 {
5346 return (*the_low_target.install_fast_tracepoint_jump_pad)
5347 (tpoint, tpaddr, collector, lockaddr, orig_size,
5348 jump_entry, trampoline, trampoline_size,
5349 jjump_pad_insn, jjump_pad_insn_size,
5350 adjusted_insn_addr, adjusted_insn_addr_end,
5351 err);
5352 }
5353
5354 static struct emit_ops *
5355 linux_emit_ops (void)
5356 {
5357 if (the_low_target.emit_ops != NULL)
5358 return (*the_low_target.emit_ops) ();
5359 else
5360 return NULL;
5361 }
5362
5363 static int
5364 linux_get_min_fast_tracepoint_insn_len (void)
5365 {
5366 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5367 }
5368
5369 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5370
5371 static int
5372 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5373 CORE_ADDR *phdr_memaddr, int *num_phdr)
5374 {
5375 char filename[PATH_MAX];
5376 int fd;
5377 const int auxv_size = is_elf64
5378 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5379 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5380
5381 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5382
5383 fd = open (filename, O_RDONLY);
5384 if (fd < 0)
5385 return 1;
5386
5387 *phdr_memaddr = 0;
5388 *num_phdr = 0;
5389 while (read (fd, buf, auxv_size) == auxv_size
5390 && (*phdr_memaddr == 0 || *num_phdr == 0))
5391 {
5392 if (is_elf64)
5393 {
5394 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5395
5396 switch (aux->a_type)
5397 {
5398 case AT_PHDR:
5399 *phdr_memaddr = aux->a_un.a_val;
5400 break;
5401 case AT_PHNUM:
5402 *num_phdr = aux->a_un.a_val;
5403 break;
5404 }
5405 }
5406 else
5407 {
5408 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5409
5410 switch (aux->a_type)
5411 {
5412 case AT_PHDR:
5413 *phdr_memaddr = aux->a_un.a_val;
5414 break;
5415 case AT_PHNUM:
5416 *num_phdr = aux->a_un.a_val;
5417 break;
5418 }
5419 }
5420 }
5421
5422 close (fd);
5423
5424 if (*phdr_memaddr == 0 || *num_phdr == 0)
5425 {
5426 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5427 "phdr_memaddr = %ld, phdr_num = %d",
5428 (long) *phdr_memaddr, *num_phdr);
5429 return 2;
5430 }
5431
5432 return 0;
5433 }
5434
5435 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5436
5437 static CORE_ADDR
5438 get_dynamic (const int pid, const int is_elf64)
5439 {
5440 CORE_ADDR phdr_memaddr, relocation;
5441 int num_phdr, i;
5442 unsigned char *phdr_buf;
5443 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5444
5445 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5446 return 0;
5447
5448 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5449 phdr_buf = alloca (num_phdr * phdr_size);
5450
5451 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5452 return 0;
5453
5454 /* Compute relocation: it is expected to be 0 for "regular" executables,
5455 non-zero for PIE ones. */
5456 relocation = -1;
5457 for (i = 0; relocation == -1 && i < num_phdr; i++)
5458 if (is_elf64)
5459 {
5460 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5461
5462 if (p->p_type == PT_PHDR)
5463 relocation = phdr_memaddr - p->p_vaddr;
5464 }
5465 else
5466 {
5467 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5468
5469 if (p->p_type == PT_PHDR)
5470 relocation = phdr_memaddr - p->p_vaddr;
5471 }
5472
5473 if (relocation == -1)
5474 {
5475 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5476 any real world executables, including PIE executables, have always
5477 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5478 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5479 or present DT_DEBUG anyway (fpc binaries are statically linked).
5480
5481 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5482
5483 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5484
5485 return 0;
5486 }
5487
5488 for (i = 0; i < num_phdr; i++)
5489 {
5490 if (is_elf64)
5491 {
5492 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5493
5494 if (p->p_type == PT_DYNAMIC)
5495 return p->p_vaddr + relocation;
5496 }
5497 else
5498 {
5499 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5500
5501 if (p->p_type == PT_DYNAMIC)
5502 return p->p_vaddr + relocation;
5503 }
5504 }
5505
5506 return 0;
5507 }
5508
5509 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5510 can be 0 if the inferior does not yet have the library list initialized.
5511 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5512 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5513
5514 static CORE_ADDR
5515 get_r_debug (const int pid, const int is_elf64)
5516 {
5517 CORE_ADDR dynamic_memaddr;
5518 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5519 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5520 CORE_ADDR map = -1;
5521
5522 dynamic_memaddr = get_dynamic (pid, is_elf64);
5523 if (dynamic_memaddr == 0)
5524 return map;
5525
5526 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5527 {
5528 if (is_elf64)
5529 {
5530 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5531 #ifdef DT_MIPS_RLD_MAP
5532 union
5533 {
5534 Elf64_Xword map;
5535 unsigned char buf[sizeof (Elf64_Xword)];
5536 }
5537 rld_map;
5538
5539 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5540 {
5541 if (linux_read_memory (dyn->d_un.d_val,
5542 rld_map.buf, sizeof (rld_map.buf)) == 0)
5543 return rld_map.map;
5544 else
5545 break;
5546 }
5547 #endif /* DT_MIPS_RLD_MAP */
5548
5549 if (dyn->d_tag == DT_DEBUG && map == -1)
5550 map = dyn->d_un.d_val;
5551
5552 if (dyn->d_tag == DT_NULL)
5553 break;
5554 }
5555 else
5556 {
5557 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5558 #ifdef DT_MIPS_RLD_MAP
5559 union
5560 {
5561 Elf32_Word map;
5562 unsigned char buf[sizeof (Elf32_Word)];
5563 }
5564 rld_map;
5565
5566 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5567 {
5568 if (linux_read_memory (dyn->d_un.d_val,
5569 rld_map.buf, sizeof (rld_map.buf)) == 0)
5570 return rld_map.map;
5571 else
5572 break;
5573 }
5574 #endif /* DT_MIPS_RLD_MAP */
5575
5576 if (dyn->d_tag == DT_DEBUG && map == -1)
5577 map = dyn->d_un.d_val;
5578
5579 if (dyn->d_tag == DT_NULL)
5580 break;
5581 }
5582
5583 dynamic_memaddr += dyn_size;
5584 }
5585
5586 return map;
5587 }
5588
5589 /* Read one pointer from MEMADDR in the inferior. */
5590
5591 static int
5592 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5593 {
5594 int ret;
5595
5596 /* Go through a union so this works on either big or little endian
5597 hosts, when the inferior's pointer size is smaller than the size
5598 of CORE_ADDR. It is assumed the inferior's endianness is the
5599 same of the superior's. */
5600 union
5601 {
5602 CORE_ADDR core_addr;
5603 unsigned int ui;
5604 unsigned char uc;
5605 } addr;
5606
5607 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5608 if (ret == 0)
5609 {
5610 if (ptr_size == sizeof (CORE_ADDR))
5611 *ptr = addr.core_addr;
5612 else if (ptr_size == sizeof (unsigned int))
5613 *ptr = addr.ui;
5614 else
5615 gdb_assert_not_reached ("unhandled pointer size");
5616 }
5617 return ret;
5618 }
5619
5620 struct link_map_offsets
5621 {
5622 /* Offset and size of r_debug.r_version. */
5623 int r_version_offset;
5624
5625 /* Offset and size of r_debug.r_map. */
5626 int r_map_offset;
5627
5628 /* Offset to l_addr field in struct link_map. */
5629 int l_addr_offset;
5630
5631 /* Offset to l_name field in struct link_map. */
5632 int l_name_offset;
5633
5634 /* Offset to l_ld field in struct link_map. */
5635 int l_ld_offset;
5636
5637 /* Offset to l_next field in struct link_map. */
5638 int l_next_offset;
5639
5640 /* Offset to l_prev field in struct link_map. */
5641 int l_prev_offset;
5642 };
5643
5644 /* Construct qXfer:libraries-svr4:read reply. */
5645
5646 static int
5647 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5648 unsigned const char *writebuf,
5649 CORE_ADDR offset, int len)
5650 {
5651 char *document;
5652 unsigned document_len;
5653 struct process_info_private *const priv = current_process ()->private;
5654 char filename[PATH_MAX];
5655 int pid, is_elf64;
5656
5657 static const struct link_map_offsets lmo_32bit_offsets =
5658 {
5659 0, /* r_version offset. */
5660 4, /* r_debug.r_map offset. */
5661 0, /* l_addr offset in link_map. */
5662 4, /* l_name offset in link_map. */
5663 8, /* l_ld offset in link_map. */
5664 12, /* l_next offset in link_map. */
5665 16 /* l_prev offset in link_map. */
5666 };
5667
5668 static const struct link_map_offsets lmo_64bit_offsets =
5669 {
5670 0, /* r_version offset. */
5671 8, /* r_debug.r_map offset. */
5672 0, /* l_addr offset in link_map. */
5673 8, /* l_name offset in link_map. */
5674 16, /* l_ld offset in link_map. */
5675 24, /* l_next offset in link_map. */
5676 32 /* l_prev offset in link_map. */
5677 };
5678 const struct link_map_offsets *lmo;
5679 unsigned int machine;
5680
5681 if (writebuf != NULL)
5682 return -2;
5683 if (readbuf == NULL)
5684 return -1;
5685
5686 pid = lwpid_of (get_thread_lwp (current_inferior));
5687 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5688 is_elf64 = elf_64_file_p (filename, &machine);
5689 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5690
5691 if (priv->r_debug == 0)
5692 priv->r_debug = get_r_debug (pid, is_elf64);
5693
5694 /* We failed to find DT_DEBUG. Such situation will not change for this
5695 inferior - do not retry it. Report it to GDB as E01, see for the reasons
5696 at the GDB solib-svr4.c side. */
5697 if (priv->r_debug == (CORE_ADDR) -1)
5698 return -1;
5699
5700 if (priv->r_debug == 0)
5701 {
5702 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5703 }
5704 else
5705 {
5706 int allocated = 1024;
5707 char *p;
5708 const int ptr_size = is_elf64 ? 8 : 4;
5709 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5710 int r_version, header_done = 0;
5711
5712 document = xmalloc (allocated);
5713 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5714 p = document + strlen (document);
5715
5716 r_version = 0;
5717 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5718 (unsigned char *) &r_version,
5719 sizeof (r_version)) != 0
5720 || r_version != 1)
5721 {
5722 warning ("unexpected r_debug version %d", r_version);
5723 goto done;
5724 }
5725
5726 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5727 &lm_addr, ptr_size) != 0)
5728 {
5729 warning ("unable to read r_map from 0x%lx",
5730 (long) priv->r_debug + lmo->r_map_offset);
5731 goto done;
5732 }
5733
5734 lm_prev = 0;
5735 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5736 &l_name, ptr_size) == 0
5737 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5738 &l_addr, ptr_size) == 0
5739 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5740 &l_ld, ptr_size) == 0
5741 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5742 &l_prev, ptr_size) == 0
5743 && read_one_ptr (lm_addr + lmo->l_next_offset,
5744 &l_next, ptr_size) == 0)
5745 {
5746 unsigned char libname[PATH_MAX];
5747
5748 if (lm_prev != l_prev)
5749 {
5750 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5751 (long) lm_prev, (long) l_prev);
5752 break;
5753 }
5754
5755 /* Not checking for error because reading may stop before
5756 we've got PATH_MAX worth of characters. */
5757 libname[0] = '\0';
5758 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5759 libname[sizeof (libname) - 1] = '\0';
5760 if (libname[0] != '\0')
5761 {
5762 /* 6x the size for xml_escape_text below. */
5763 size_t len = 6 * strlen ((char *) libname);
5764 char *name;
5765
5766 if (!header_done)
5767 {
5768 /* Terminate `<library-list-svr4'. */
5769 *p++ = '>';
5770 header_done = 1;
5771 }
5772
5773 while (allocated < p - document + len + 200)
5774 {
5775 /* Expand to guarantee sufficient storage. */
5776 uintptr_t document_len = p - document;
5777
5778 document = xrealloc (document, 2 * allocated);
5779 allocated *= 2;
5780 p = document + document_len;
5781 }
5782
5783 name = xml_escape_text ((char *) libname);
5784 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5785 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5786 name, (unsigned long) lm_addr,
5787 (unsigned long) l_addr, (unsigned long) l_ld);
5788 free (name);
5789 }
5790 else if (lm_prev == 0)
5791 {
5792 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5793 p = p + strlen (p);
5794 }
5795
5796 if (l_next == 0)
5797 break;
5798
5799 lm_prev = lm_addr;
5800 lm_addr = l_next;
5801 }
5802 done:
5803 if (!header_done)
5804 {
5805 /* Empty list; terminate `<library-list-svr4'. */
5806 strcpy (p, "/>");
5807 }
5808 else
5809 strcpy (p, "</library-list-svr4>");
5810 }
5811
5812 document_len = strlen (document);
5813 if (offset < document_len)
5814 document_len -= offset;
5815 else
5816 document_len = 0;
5817 if (len > document_len)
5818 len = document_len;
5819
5820 memcpy (readbuf, document + offset, len);
5821 xfree (document);
5822
5823 return len;
5824 }
5825
5826 #ifdef HAVE_LINUX_BTRACE
5827
5828 /* Enable branch tracing. */
5829
5830 static struct btrace_target_info *
5831 linux_low_enable_btrace (ptid_t ptid)
5832 {
5833 struct btrace_target_info *tinfo;
5834
5835 tinfo = linux_enable_btrace (ptid);
5836 if (tinfo != NULL)
5837 tinfo->ptr_bits = register_size (0) * 8;
5838
5839 return tinfo;
5840 }
5841
5842 /* Read branch trace data as btrace xml document. */
5843
5844 static void
5845 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5846 int type)
5847 {
5848 VEC (btrace_block_s) *btrace;
5849 struct btrace_block *block;
5850 int i;
5851
5852 btrace = linux_read_btrace (tinfo, type);
5853
5854 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5855 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5856
5857 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5858 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5859 paddress (block->begin), paddress (block->end));
5860
5861 buffer_grow_str (buffer, "</btrace>\n");
5862
5863 VEC_free (btrace_block_s, btrace);
5864 }
5865 #endif /* HAVE_LINUX_BTRACE */
5866
5867 static struct target_ops linux_target_ops = {
5868 linux_create_inferior,
5869 linux_attach,
5870 linux_kill,
5871 linux_detach,
5872 linux_mourn,
5873 linux_join,
5874 linux_thread_alive,
5875 linux_resume,
5876 linux_wait,
5877 linux_fetch_registers,
5878 linux_store_registers,
5879 linux_prepare_to_access_memory,
5880 linux_done_accessing_memory,
5881 linux_read_memory,
5882 linux_write_memory,
5883 linux_look_up_symbols,
5884 linux_request_interrupt,
5885 linux_read_auxv,
5886 linux_insert_point,
5887 linux_remove_point,
5888 linux_stopped_by_watchpoint,
5889 linux_stopped_data_address,
5890 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5891 linux_read_offsets,
5892 #else
5893 NULL,
5894 #endif
5895 #ifdef USE_THREAD_DB
5896 thread_db_get_tls_address,
5897 #else
5898 NULL,
5899 #endif
5900 linux_qxfer_spu,
5901 hostio_last_error_from_errno,
5902 linux_qxfer_osdata,
5903 linux_xfer_siginfo,
5904 linux_supports_non_stop,
5905 linux_async,
5906 linux_start_non_stop,
5907 linux_supports_multi_process,
5908 #ifdef USE_THREAD_DB
5909 thread_db_handle_monitor_command,
5910 #else
5911 NULL,
5912 #endif
5913 linux_common_core_of_thread,
5914 linux_read_loadmap,
5915 linux_process_qsupported,
5916 linux_supports_tracepoints,
5917 linux_read_pc,
5918 linux_write_pc,
5919 linux_thread_stopped,
5920 NULL,
5921 linux_pause_all,
5922 linux_unpause_all,
5923 linux_cancel_breakpoints,
5924 linux_stabilize_threads,
5925 linux_install_fast_tracepoint_jump_pad,
5926 linux_emit_ops,
5927 linux_supports_disable_randomization,
5928 linux_get_min_fast_tracepoint_insn_len,
5929 linux_qxfer_libraries_svr4,
5930 linux_supports_agent,
5931 #ifdef HAVE_LINUX_BTRACE
5932 linux_supports_btrace,
5933 linux_low_enable_btrace,
5934 linux_disable_btrace,
5935 linux_low_read_btrace,
5936 #else
5937 NULL,
5938 NULL,
5939 NULL,
5940 NULL,
5941 #endif
5942 };
5943
5944 static void
5945 linux_init_signals ()
5946 {
5947 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5948 to find what the cancel signal actually is. */
5949 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5950 signal (__SIGRTMIN+1, SIG_IGN);
5951 #endif
5952 }
5953
5954 void
5955 initialize_low (void)
5956 {
5957 struct sigaction sigchld_action;
5958 memset (&sigchld_action, 0, sizeof (sigchld_action));
5959 set_target_ops (&linux_target_ops);
5960 set_breakpoint_data (the_low_target.breakpoint,
5961 the_low_target.breakpoint_len);
5962 linux_init_signals ();
5963 linux_test_for_tracefork ();
5964 linux_ptrace_init_warnings ();
5965 #ifdef HAVE_LINUX_REGSETS
5966 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5967 ;
5968 disabled_regsets = xmalloc (num_regsets);
5969 #endif
5970
5971 sigchld_action.sa_handler = sigchld_handler;
5972 sigemptyset (&sigchld_action.sa_mask);
5973 sigchld_action.sa_flags = SA_RESTART;
5974 sigaction (SIGCHLD, &sigchld_action, NULL);
5975 }
This page took 0.154043 seconds and 5 git commands to generate.