Move shared native target specific code to gdb/nat
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <stdio.h>
28 #include <sys/ptrace.h>
29 #include "nat/linux-ptrace.h"
30 #include "nat/linux-procfs.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include "filestuff.h"
48 #include "tracepoint.h"
49 #include "hostio.h"
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* A list of all unknown processes which receive stop signals. Some
142 other process will presumably claim each of these as forked
143 children momentarily. */
144
145 struct simple_pid_list
146 {
147 /* The process ID. */
148 int pid;
149
150 /* The status as reported by waitpid. */
151 int status;
152
153 /* Next in chain. */
154 struct simple_pid_list *next;
155 };
156 struct simple_pid_list *stopped_pids;
157
158 /* Trivial list manipulation functions to keep track of a list of new
159 stopped processes. */
160
161 static void
162 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
163 {
164 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
165
166 new_pid->pid = pid;
167 new_pid->status = status;
168 new_pid->next = *listp;
169 *listp = new_pid;
170 }
171
172 static int
173 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
174 {
175 struct simple_pid_list **p;
176
177 for (p = listp; *p != NULL; p = &(*p)->next)
178 if ((*p)->pid == pid)
179 {
180 struct simple_pid_list *next = (*p)->next;
181
182 *statusp = (*p)->status;
183 xfree (*p);
184 *p = next;
185 return 1;
186 }
187 return 0;
188 }
189
190 enum stopping_threads_kind
191 {
192 /* Not stopping threads presently. */
193 NOT_STOPPING_THREADS,
194
195 /* Stopping threads. */
196 STOPPING_THREADS,
197
198 /* Stopping and suspending threads. */
199 STOPPING_AND_SUSPENDING_THREADS
200 };
201
202 /* This is set while stop_all_lwps is in effect. */
203 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
204
205 /* FIXME make into a target method? */
206 int using_threads = 1;
207
208 /* True if we're presently stabilizing threads (moving them out of
209 jump pads). */
210 static int stabilizing_threads;
211
212 static void linux_resume_one_lwp (struct lwp_info *lwp,
213 int step, int signal, siginfo_t *info);
214 static void linux_resume (struct thread_resume *resume_info, size_t n);
215 static void stop_all_lwps (int suspend, struct lwp_info *except);
216 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
217 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
218 int *wstat, int options);
219 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
220 static struct lwp_info *add_lwp (ptid_t ptid);
221 static int linux_stopped_by_watchpoint (void);
222 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
223 static void proceed_all_lwps (void);
224 static int finish_step_over (struct lwp_info *lwp);
225 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
226 static int kill_lwp (unsigned long lwpid, int signo);
227
228 /* True if the low target can hardware single-step. Such targets
229 don't need a BREAKPOINT_REINSERT_ADDR callback. */
230
231 static int
232 can_hardware_single_step (void)
233 {
234 return (the_low_target.breakpoint_reinsert_addr == NULL);
235 }
236
237 /* True if the low target supports memory breakpoints. If so, we'll
238 have a GET_PC implementation. */
239
240 static int
241 supports_breakpoints (void)
242 {
243 return (the_low_target.get_pc != NULL);
244 }
245
246 /* Returns true if this target can support fast tracepoints. This
247 does not mean that the in-process agent has been loaded in the
248 inferior. */
249
250 static int
251 supports_fast_tracepoints (void)
252 {
253 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
254 }
255
256 /* True if LWP is stopped in its stepping range. */
257
258 static int
259 lwp_in_step_range (struct lwp_info *lwp)
260 {
261 CORE_ADDR pc = lwp->stop_pc;
262
263 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
264 }
265
266 struct pending_signals
267 {
268 int signal;
269 siginfo_t info;
270 struct pending_signals *prev;
271 };
272
273 /* The read/write ends of the pipe registered as waitable file in the
274 event loop. */
275 static int linux_event_pipe[2] = { -1, -1 };
276
277 /* True if we're currently in async mode. */
278 #define target_is_async_p() (linux_event_pipe[0] != -1)
279
280 static void send_sigstop (struct lwp_info *lwp);
281 static void wait_for_sigstop (void);
282
283 /* Return non-zero if HEADER is a 64-bit ELF file. */
284
285 static int
286 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
287 {
288 if (header->e_ident[EI_MAG0] == ELFMAG0
289 && header->e_ident[EI_MAG1] == ELFMAG1
290 && header->e_ident[EI_MAG2] == ELFMAG2
291 && header->e_ident[EI_MAG3] == ELFMAG3)
292 {
293 *machine = header->e_machine;
294 return header->e_ident[EI_CLASS] == ELFCLASS64;
295
296 }
297 *machine = EM_NONE;
298 return -1;
299 }
300
301 /* Return non-zero if FILE is a 64-bit ELF file,
302 zero if the file is not a 64-bit ELF file,
303 and -1 if the file is not accessible or doesn't exist. */
304
305 static int
306 elf_64_file_p (const char *file, unsigned int *machine)
307 {
308 Elf64_Ehdr header;
309 int fd;
310
311 fd = open (file, O_RDONLY);
312 if (fd < 0)
313 return -1;
314
315 if (read (fd, &header, sizeof (header)) != sizeof (header))
316 {
317 close (fd);
318 return 0;
319 }
320 close (fd);
321
322 return elf_64_header_p (&header, machine);
323 }
324
325 /* Accepts an integer PID; Returns true if the executable PID is
326 running is a 64-bit ELF file.. */
327
328 int
329 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
330 {
331 char file[PATH_MAX];
332
333 sprintf (file, "/proc/%d/exe", pid);
334 return elf_64_file_p (file, machine);
335 }
336
337 static void
338 delete_lwp (struct lwp_info *lwp)
339 {
340 struct thread_info *thr = get_lwp_thread (lwp);
341
342 if (debug_threads)
343 debug_printf ("deleting %ld\n", lwpid_of (thr));
344
345 remove_thread (thr);
346 free (lwp->arch_private);
347 free (lwp);
348 }
349
350 /* Add a process to the common process list, and set its private
351 data. */
352
353 static struct process_info *
354 linux_add_process (int pid, int attached)
355 {
356 struct process_info *proc;
357
358 proc = add_process (pid, attached);
359 proc->private = xcalloc (1, sizeof (*proc->private));
360
361 /* Set the arch when the first LWP stops. */
362 proc->private->new_inferior = 1;
363
364 if (the_low_target.new_process != NULL)
365 proc->private->arch_private = the_low_target.new_process ();
366
367 return proc;
368 }
369
370 /* Handle a GNU/Linux extended wait response. If we see a clone
371 event, we need to add the new LWP to our list (and not report the
372 trap to higher layers). */
373
374 static void
375 handle_extended_wait (struct lwp_info *event_child, int wstat)
376 {
377 int event = wstat >> 16;
378 struct thread_info *event_thr = get_lwp_thread (event_child);
379 struct lwp_info *new_lwp;
380
381 if (event == PTRACE_EVENT_CLONE)
382 {
383 ptid_t ptid;
384 unsigned long new_pid;
385 int ret, status;
386
387 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
388 &new_pid);
389
390 /* If we haven't already seen the new PID stop, wait for it now. */
391 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
392 {
393 /* The new child has a pending SIGSTOP. We can't affect it until it
394 hits the SIGSTOP, but we're already attached. */
395
396 ret = my_waitpid (new_pid, &status, __WALL);
397
398 if (ret == -1)
399 perror_with_name ("waiting for new child");
400 else if (ret != new_pid)
401 warning ("wait returned unexpected PID %d", ret);
402 else if (!WIFSTOPPED (status))
403 warning ("wait returned unexpected status 0x%x", status);
404 }
405
406 if (debug_threads)
407 debug_printf ("HEW: Got clone event "
408 "from LWP %ld, new child is LWP %ld\n",
409 lwpid_of (event_thr), new_pid);
410
411 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
412 new_lwp = add_lwp (ptid);
413
414 /* Either we're going to immediately resume the new thread
415 or leave it stopped. linux_resume_one_lwp is a nop if it
416 thinks the thread is currently running, so set this first
417 before calling linux_resume_one_lwp. */
418 new_lwp->stopped = 1;
419
420 /* If we're suspending all threads, leave this one suspended
421 too. */
422 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
423 new_lwp->suspended = 1;
424
425 /* Normally we will get the pending SIGSTOP. But in some cases
426 we might get another signal delivered to the group first.
427 If we do get another signal, be sure not to lose it. */
428 if (WSTOPSIG (status) == SIGSTOP)
429 {
430 if (stopping_threads != NOT_STOPPING_THREADS)
431 new_lwp->stop_pc = get_stop_pc (new_lwp);
432 else
433 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
434 }
435 else
436 {
437 new_lwp->stop_expected = 1;
438
439 if (stopping_threads != NOT_STOPPING_THREADS)
440 {
441 new_lwp->stop_pc = get_stop_pc (new_lwp);
442 new_lwp->status_pending_p = 1;
443 new_lwp->status_pending = status;
444 }
445 else
446 /* Pass the signal on. This is what GDB does - except
447 shouldn't we really report it instead? */
448 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
449 }
450
451 /* Always resume the current thread. If we are stopping
452 threads, it will have a pending SIGSTOP; we may as well
453 collect it now. */
454 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
455 }
456 }
457
458 /* Return the PC as read from the regcache of LWP, without any
459 adjustment. */
460
461 static CORE_ADDR
462 get_pc (struct lwp_info *lwp)
463 {
464 struct thread_info *saved_inferior;
465 struct regcache *regcache;
466 CORE_ADDR pc;
467
468 if (the_low_target.get_pc == NULL)
469 return 0;
470
471 saved_inferior = current_inferior;
472 current_inferior = get_lwp_thread (lwp);
473
474 regcache = get_thread_regcache (current_inferior, 1);
475 pc = (*the_low_target.get_pc) (regcache);
476
477 if (debug_threads)
478 debug_printf ("pc is 0x%lx\n", (long) pc);
479
480 current_inferior = saved_inferior;
481 return pc;
482 }
483
484 /* This function should only be called if LWP got a SIGTRAP.
485 The SIGTRAP could mean several things.
486
487 On i386, where decr_pc_after_break is non-zero:
488 If we were single-stepping this process using PTRACE_SINGLESTEP,
489 we will get only the one SIGTRAP (even if the instruction we
490 stepped over was a breakpoint). The value of $eip will be the
491 next instruction.
492 If we continue the process using PTRACE_CONT, we will get a
493 SIGTRAP when we hit a breakpoint. The value of $eip will be
494 the instruction after the breakpoint (i.e. needs to be
495 decremented). If we report the SIGTRAP to GDB, we must also
496 report the undecremented PC. If we cancel the SIGTRAP, we
497 must resume at the decremented PC.
498
499 (Presumably, not yet tested) On a non-decr_pc_after_break machine
500 with hardware or kernel single-step:
501 If we single-step over a breakpoint instruction, our PC will
502 point at the following instruction. If we continue and hit a
503 breakpoint instruction, our PC will point at the breakpoint
504 instruction. */
505
506 static CORE_ADDR
507 get_stop_pc (struct lwp_info *lwp)
508 {
509 CORE_ADDR stop_pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
513
514 stop_pc = get_pc (lwp);
515
516 if (WSTOPSIG (lwp->last_status) == SIGTRAP
517 && !lwp->stepping
518 && !lwp->stopped_by_watchpoint
519 && lwp->last_status >> 16 == 0)
520 stop_pc -= the_low_target.decr_pc_after_break;
521
522 if (debug_threads)
523 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
524
525 return stop_pc;
526 }
527
528 static struct lwp_info *
529 add_lwp (ptid_t ptid)
530 {
531 struct lwp_info *lwp;
532
533 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
534 memset (lwp, 0, sizeof (*lwp));
535
536 if (the_low_target.new_thread != NULL)
537 lwp->arch_private = the_low_target.new_thread ();
538
539 lwp->thread = add_thread (ptid, lwp);
540
541 return lwp;
542 }
543
544 /* Start an inferior process and returns its pid.
545 ALLARGS is a vector of program-name and args. */
546
547 static int
548 linux_create_inferior (char *program, char **allargs)
549 {
550 #ifdef HAVE_PERSONALITY
551 int personality_orig = 0, personality_set = 0;
552 #endif
553 struct lwp_info *new_lwp;
554 int pid;
555 ptid_t ptid;
556
557 #ifdef HAVE_PERSONALITY
558 if (disable_randomization)
559 {
560 errno = 0;
561 personality_orig = personality (0xffffffff);
562 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
563 {
564 personality_set = 1;
565 personality (personality_orig | ADDR_NO_RANDOMIZE);
566 }
567 if (errno != 0 || (personality_set
568 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
569 warning ("Error disabling address space randomization: %s",
570 strerror (errno));
571 }
572 #endif
573
574 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
575 pid = vfork ();
576 #else
577 pid = fork ();
578 #endif
579 if (pid < 0)
580 perror_with_name ("fork");
581
582 if (pid == 0)
583 {
584 close_most_fds ();
585 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
586
587 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
588 signal (__SIGRTMIN + 1, SIG_DFL);
589 #endif
590
591 setpgid (0, 0);
592
593 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
594 stdout to stderr so that inferior i/o doesn't corrupt the connection.
595 Also, redirect stdin to /dev/null. */
596 if (remote_connection_is_stdio ())
597 {
598 close (0);
599 open ("/dev/null", O_RDONLY);
600 dup2 (2, 1);
601 if (write (2, "stdin/stdout redirected\n",
602 sizeof ("stdin/stdout redirected\n") - 1) < 0)
603 {
604 /* Errors ignored. */;
605 }
606 }
607
608 execv (program, allargs);
609 if (errno == ENOENT)
610 execvp (program, allargs);
611
612 fprintf (stderr, "Cannot exec %s: %s.\n", program,
613 strerror (errno));
614 fflush (stderr);
615 _exit (0177);
616 }
617
618 #ifdef HAVE_PERSONALITY
619 if (personality_set)
620 {
621 errno = 0;
622 personality (personality_orig);
623 if (errno != 0)
624 warning ("Error restoring address space randomization: %s",
625 strerror (errno));
626 }
627 #endif
628
629 linux_add_process (pid, 0);
630
631 ptid = ptid_build (pid, pid, 0);
632 new_lwp = add_lwp (ptid);
633 new_lwp->must_set_ptrace_flags = 1;
634
635 return pid;
636 }
637
638 char *
639 linux_attach_fail_reason_string (ptid_t ptid, int err)
640 {
641 static char *reason_string;
642 struct buffer buffer;
643 char *warnings;
644 long lwpid = ptid_get_lwp (ptid);
645
646 xfree (reason_string);
647
648 buffer_init (&buffer);
649 linux_ptrace_attach_fail_reason (lwpid, &buffer);
650 buffer_grow_str0 (&buffer, "");
651 warnings = buffer_finish (&buffer);
652 if (warnings[0] != '\0')
653 reason_string = xstrprintf ("%s (%d), %s",
654 strerror (err), err, warnings);
655 else
656 reason_string = xstrprintf ("%s (%d)",
657 strerror (err), err);
658 xfree (warnings);
659 return reason_string;
660 }
661
662 /* Attach to an inferior process. */
663
664 int
665 linux_attach_lwp (ptid_t ptid)
666 {
667 struct lwp_info *new_lwp;
668 int lwpid = ptid_get_lwp (ptid);
669
670 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
671 != 0)
672 return errno;
673
674 new_lwp = add_lwp (ptid);
675
676 /* We need to wait for SIGSTOP before being able to make the next
677 ptrace call on this LWP. */
678 new_lwp->must_set_ptrace_flags = 1;
679
680 if (linux_proc_pid_is_stopped (lwpid))
681 {
682 if (debug_threads)
683 debug_printf ("Attached to a stopped process\n");
684
685 /* The process is definitely stopped. It is in a job control
686 stop, unless the kernel predates the TASK_STOPPED /
687 TASK_TRACED distinction, in which case it might be in a
688 ptrace stop. Make sure it is in a ptrace stop; from there we
689 can kill it, signal it, et cetera.
690
691 First make sure there is a pending SIGSTOP. Since we are
692 already attached, the process can not transition from stopped
693 to running without a PTRACE_CONT; so we know this signal will
694 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
695 probably already in the queue (unless this kernel is old
696 enough to use TASK_STOPPED for ptrace stops); but since
697 SIGSTOP is not an RT signal, it can only be queued once. */
698 kill_lwp (lwpid, SIGSTOP);
699
700 /* Finally, resume the stopped process. This will deliver the
701 SIGSTOP (or a higher priority signal, just like normal
702 PTRACE_ATTACH), which we'll catch later on. */
703 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
704 }
705
706 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
707 brings it to a halt.
708
709 There are several cases to consider here:
710
711 1) gdbserver has already attached to the process and is being notified
712 of a new thread that is being created.
713 In this case we should ignore that SIGSTOP and resume the
714 process. This is handled below by setting stop_expected = 1,
715 and the fact that add_thread sets last_resume_kind ==
716 resume_continue.
717
718 2) This is the first thread (the process thread), and we're attaching
719 to it via attach_inferior.
720 In this case we want the process thread to stop.
721 This is handled by having linux_attach set last_resume_kind ==
722 resume_stop after we return.
723
724 If the pid we are attaching to is also the tgid, we attach to and
725 stop all the existing threads. Otherwise, we attach to pid and
726 ignore any other threads in the same group as this pid.
727
728 3) GDB is connecting to gdbserver and is requesting an enumeration of all
729 existing threads.
730 In this case we want the thread to stop.
731 FIXME: This case is currently not properly handled.
732 We should wait for the SIGSTOP but don't. Things work apparently
733 because enough time passes between when we ptrace (ATTACH) and when
734 gdb makes the next ptrace call on the thread.
735
736 On the other hand, if we are currently trying to stop all threads, we
737 should treat the new thread as if we had sent it a SIGSTOP. This works
738 because we are guaranteed that the add_lwp call above added us to the
739 end of the list, and so the new thread has not yet reached
740 wait_for_sigstop (but will). */
741 new_lwp->stop_expected = 1;
742
743 return 0;
744 }
745
746 /* Attach to PID. If PID is the tgid, attach to it and all
747 of its threads. */
748
749 static int
750 linux_attach (unsigned long pid)
751 {
752 ptid_t ptid = ptid_build (pid, pid, 0);
753 int err;
754
755 /* Attach to PID. We will check for other threads
756 soon. */
757 err = linux_attach_lwp (ptid);
758 if (err != 0)
759 error ("Cannot attach to process %ld: %s",
760 pid, linux_attach_fail_reason_string (ptid, err));
761
762 linux_add_process (pid, 1);
763
764 if (!non_stop)
765 {
766 struct thread_info *thread;
767
768 /* Don't ignore the initial SIGSTOP if we just attached to this
769 process. It will be collected by wait shortly. */
770 thread = find_thread_ptid (ptid_build (pid, pid, 0));
771 thread->last_resume_kind = resume_stop;
772 }
773
774 if (linux_proc_get_tgid (pid) == pid)
775 {
776 DIR *dir;
777 char pathname[128];
778
779 sprintf (pathname, "/proc/%ld/task", pid);
780
781 dir = opendir (pathname);
782
783 if (!dir)
784 {
785 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
786 fflush (stderr);
787 }
788 else
789 {
790 /* At this point we attached to the tgid. Scan the task for
791 existing threads. */
792 int new_threads_found;
793 int iterations = 0;
794
795 while (iterations < 2)
796 {
797 struct dirent *dp;
798
799 new_threads_found = 0;
800 /* Add all the other threads. While we go through the
801 threads, new threads may be spawned. Cycle through
802 the list of threads until we have done two iterations without
803 finding new threads. */
804 while ((dp = readdir (dir)) != NULL)
805 {
806 unsigned long lwp;
807 ptid_t ptid;
808
809 /* Fetch one lwp. */
810 lwp = strtoul (dp->d_name, NULL, 10);
811
812 ptid = ptid_build (pid, lwp, 0);
813
814 /* Is this a new thread? */
815 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
816 {
817 int err;
818
819 if (debug_threads)
820 debug_printf ("Found new lwp %ld\n", lwp);
821
822 err = linux_attach_lwp (ptid);
823 if (err != 0)
824 warning ("Cannot attach to lwp %ld: %s",
825 lwp,
826 linux_attach_fail_reason_string (ptid, err));
827
828 new_threads_found++;
829 }
830 }
831
832 if (!new_threads_found)
833 iterations++;
834 else
835 iterations = 0;
836
837 rewinddir (dir);
838 }
839 closedir (dir);
840 }
841 }
842
843 return 0;
844 }
845
846 struct counter
847 {
848 int pid;
849 int count;
850 };
851
852 static int
853 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
854 {
855 struct counter *counter = args;
856
857 if (ptid_get_pid (entry->id) == counter->pid)
858 {
859 if (++counter->count > 1)
860 return 1;
861 }
862
863 return 0;
864 }
865
866 static int
867 last_thread_of_process_p (int pid)
868 {
869 struct counter counter = { pid , 0 };
870
871 return (find_inferior (&all_threads,
872 second_thread_of_pid_p, &counter) == NULL);
873 }
874
875 /* Kill LWP. */
876
877 static void
878 linux_kill_one_lwp (struct lwp_info *lwp)
879 {
880 struct thread_info *thr = get_lwp_thread (lwp);
881 int pid = lwpid_of (thr);
882
883 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
884 there is no signal context, and ptrace(PTRACE_KILL) (or
885 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
886 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
887 alternative is to kill with SIGKILL. We only need one SIGKILL
888 per process, not one for each thread. But since we still support
889 linuxthreads, and we also support debugging programs using raw
890 clone without CLONE_THREAD, we send one for each thread. For
891 years, we used PTRACE_KILL only, so we're being a bit paranoid
892 about some old kernels where PTRACE_KILL might work better
893 (dubious if there are any such, but that's why it's paranoia), so
894 we try SIGKILL first, PTRACE_KILL second, and so we're fine
895 everywhere. */
896
897 errno = 0;
898 kill (pid, SIGKILL);
899 if (debug_threads)
900 debug_printf ("LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
901 target_pid_to_str (ptid_of (thr)),
902 errno ? strerror (errno) : "OK");
903
904 errno = 0;
905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
906 if (debug_threads)
907 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
908 target_pid_to_str (ptid_of (thr)),
909 errno ? strerror (errno) : "OK");
910 }
911
912 /* Callback for `find_inferior'. Kills an lwp of a given process,
913 except the leader. */
914
915 static int
916 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
917 {
918 struct thread_info *thread = (struct thread_info *) entry;
919 struct lwp_info *lwp = get_thread_lwp (thread);
920 int wstat;
921 int pid = * (int *) args;
922
923 if (ptid_get_pid (entry->id) != pid)
924 return 0;
925
926 /* We avoid killing the first thread here, because of a Linux kernel (at
927 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
928 the children get a chance to be reaped, it will remain a zombie
929 forever. */
930
931 if (lwpid_of (thread) == pid)
932 {
933 if (debug_threads)
934 debug_printf ("lkop: is last of process %s\n",
935 target_pid_to_str (entry->id));
936 return 0;
937 }
938
939 do
940 {
941 linux_kill_one_lwp (lwp);
942
943 /* Make sure it died. The loop is most likely unnecessary. */
944 pid = linux_wait_for_event (thread->entry.id, &wstat, __WALL);
945 } while (pid > 0 && WIFSTOPPED (wstat));
946
947 return 0;
948 }
949
950 static int
951 linux_kill (int pid)
952 {
953 struct process_info *process;
954 struct lwp_info *lwp;
955 int wstat;
956 int lwpid;
957
958 process = find_process_pid (pid);
959 if (process == NULL)
960 return -1;
961
962 /* If we're killing a running inferior, make sure it is stopped
963 first, as PTRACE_KILL will not work otherwise. */
964 stop_all_lwps (0, NULL);
965
966 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
967
968 /* See the comment in linux_kill_one_lwp. We did not kill the first
969 thread in the list, so do so now. */
970 lwp = find_lwp_pid (pid_to_ptid (pid));
971
972 if (lwp == NULL)
973 {
974 if (debug_threads)
975 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
976 pid);
977 }
978 else
979 {
980 struct thread_info *thr = get_lwp_thread (lwp);
981
982 if (debug_threads)
983 debug_printf ("lk_1: killing lwp %ld, for pid: %d\n",
984 lwpid_of (thr), pid);
985
986 do
987 {
988 linux_kill_one_lwp (lwp);
989
990 /* Make sure it died. The loop is most likely unnecessary. */
991 lwpid = linux_wait_for_event (thr->entry.id, &wstat, __WALL);
992 } while (lwpid > 0 && WIFSTOPPED (wstat));
993 }
994
995 the_target->mourn (process);
996
997 /* Since we presently can only stop all lwps of all processes, we
998 need to unstop lwps of other processes. */
999 unstop_all_lwps (0, NULL);
1000 return 0;
1001 }
1002
1003 /* Get pending signal of THREAD, for detaching purposes. This is the
1004 signal the thread last stopped for, which we need to deliver to the
1005 thread when detaching, otherwise, it'd be suppressed/lost. */
1006
1007 static int
1008 get_detach_signal (struct thread_info *thread)
1009 {
1010 enum gdb_signal signo = GDB_SIGNAL_0;
1011 int status;
1012 struct lwp_info *lp = get_thread_lwp (thread);
1013
1014 if (lp->status_pending_p)
1015 status = lp->status_pending;
1016 else
1017 {
1018 /* If the thread had been suspended by gdbserver, and it stopped
1019 cleanly, then it'll have stopped with SIGSTOP. But we don't
1020 want to deliver that SIGSTOP. */
1021 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1022 || thread->last_status.value.sig == GDB_SIGNAL_0)
1023 return 0;
1024
1025 /* Otherwise, we may need to deliver the signal we
1026 intercepted. */
1027 status = lp->last_status;
1028 }
1029
1030 if (!WIFSTOPPED (status))
1031 {
1032 if (debug_threads)
1033 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1034 target_pid_to_str (ptid_of (thread)));
1035 return 0;
1036 }
1037
1038 /* Extended wait statuses aren't real SIGTRAPs. */
1039 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1040 {
1041 if (debug_threads)
1042 debug_printf ("GPS: lwp %s had stopped with extended "
1043 "status: no pending signal\n",
1044 target_pid_to_str (ptid_of (thread)));
1045 return 0;
1046 }
1047
1048 signo = gdb_signal_from_host (WSTOPSIG (status));
1049
1050 if (program_signals_p && !program_signals[signo])
1051 {
1052 if (debug_threads)
1053 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1054 target_pid_to_str (ptid_of (thread)),
1055 gdb_signal_to_string (signo));
1056 return 0;
1057 }
1058 else if (!program_signals_p
1059 /* If we have no way to know which signals GDB does not
1060 want to have passed to the program, assume
1061 SIGTRAP/SIGINT, which is GDB's default. */
1062 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1063 {
1064 if (debug_threads)
1065 debug_printf ("GPS: lwp %s had signal %s, "
1066 "but we don't know if we should pass it. "
1067 "Default to not.\n",
1068 target_pid_to_str (ptid_of (thread)),
1069 gdb_signal_to_string (signo));
1070 return 0;
1071 }
1072 else
1073 {
1074 if (debug_threads)
1075 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1076 target_pid_to_str (ptid_of (thread)),
1077 gdb_signal_to_string (signo));
1078
1079 return WSTOPSIG (status);
1080 }
1081 }
1082
1083 static int
1084 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1085 {
1086 struct thread_info *thread = (struct thread_info *) entry;
1087 struct lwp_info *lwp = get_thread_lwp (thread);
1088 int pid = * (int *) args;
1089 int sig;
1090
1091 if (ptid_get_pid (entry->id) != pid)
1092 return 0;
1093
1094 /* If there is a pending SIGSTOP, get rid of it. */
1095 if (lwp->stop_expected)
1096 {
1097 if (debug_threads)
1098 debug_printf ("Sending SIGCONT to %s\n",
1099 target_pid_to_str (ptid_of (thread)));
1100
1101 kill_lwp (lwpid_of (thread), SIGCONT);
1102 lwp->stop_expected = 0;
1103 }
1104
1105 /* Flush any pending changes to the process's registers. */
1106 regcache_invalidate_thread (thread);
1107
1108 /* Pass on any pending signal for this thread. */
1109 sig = get_detach_signal (thread);
1110
1111 /* Finally, let it resume. */
1112 if (the_low_target.prepare_to_resume != NULL)
1113 the_low_target.prepare_to_resume (lwp);
1114 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1115 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1116 error (_("Can't detach %s: %s"),
1117 target_pid_to_str (ptid_of (thread)),
1118 strerror (errno));
1119
1120 delete_lwp (lwp);
1121 return 0;
1122 }
1123
1124 static int
1125 linux_detach (int pid)
1126 {
1127 struct process_info *process;
1128
1129 process = find_process_pid (pid);
1130 if (process == NULL)
1131 return -1;
1132
1133 /* Stop all threads before detaching. First, ptrace requires that
1134 the thread is stopped to sucessfully detach. Second, thread_db
1135 may need to uninstall thread event breakpoints from memory, which
1136 only works with a stopped process anyway. */
1137 stop_all_lwps (0, NULL);
1138
1139 #ifdef USE_THREAD_DB
1140 thread_db_detach (process);
1141 #endif
1142
1143 /* Stabilize threads (move out of jump pads). */
1144 stabilize_threads ();
1145
1146 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1147
1148 the_target->mourn (process);
1149
1150 /* Since we presently can only stop all lwps of all processes, we
1151 need to unstop lwps of other processes. */
1152 unstop_all_lwps (0, NULL);
1153 return 0;
1154 }
1155
1156 /* Remove all LWPs that belong to process PROC from the lwp list. */
1157
1158 static int
1159 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1160 {
1161 struct thread_info *thread = (struct thread_info *) entry;
1162 struct lwp_info *lwp = get_thread_lwp (thread);
1163 struct process_info *process = proc;
1164
1165 if (pid_of (thread) == pid_of (process))
1166 delete_lwp (lwp);
1167
1168 return 0;
1169 }
1170
1171 static void
1172 linux_mourn (struct process_info *process)
1173 {
1174 struct process_info_private *priv;
1175
1176 #ifdef USE_THREAD_DB
1177 thread_db_mourn (process);
1178 #endif
1179
1180 find_inferior (&all_threads, delete_lwp_callback, process);
1181
1182 /* Freeing all private data. */
1183 priv = process->private;
1184 free (priv->arch_private);
1185 free (priv);
1186 process->private = NULL;
1187
1188 remove_process (process);
1189 }
1190
1191 static void
1192 linux_join (int pid)
1193 {
1194 int status, ret;
1195
1196 do {
1197 ret = my_waitpid (pid, &status, 0);
1198 if (WIFEXITED (status) || WIFSIGNALED (status))
1199 break;
1200 } while (ret != -1 || errno != ECHILD);
1201 }
1202
1203 /* Return nonzero if the given thread is still alive. */
1204 static int
1205 linux_thread_alive (ptid_t ptid)
1206 {
1207 struct lwp_info *lwp = find_lwp_pid (ptid);
1208
1209 /* We assume we always know if a thread exits. If a whole process
1210 exited but we still haven't been able to report it to GDB, we'll
1211 hold on to the last lwp of the dead process. */
1212 if (lwp != NULL)
1213 return !lwp->dead;
1214 else
1215 return 0;
1216 }
1217
1218 /* Return 1 if this lwp has an interesting status pending. */
1219 static int
1220 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1221 {
1222 struct thread_info *thread = (struct thread_info *) entry;
1223 struct lwp_info *lwp = get_thread_lwp (thread);
1224 ptid_t ptid = * (ptid_t *) arg;
1225
1226 /* Check if we're only interested in events from a specific process
1227 or its lwps. */
1228 if (!ptid_equal (minus_one_ptid, ptid)
1229 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1230 return 0;
1231
1232 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1233 report any status pending the LWP may have. */
1234 if (thread->last_resume_kind == resume_stop
1235 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1236 return 0;
1237
1238 return lwp->status_pending_p;
1239 }
1240
1241 static int
1242 same_lwp (struct inferior_list_entry *entry, void *data)
1243 {
1244 ptid_t ptid = *(ptid_t *) data;
1245 int lwp;
1246
1247 if (ptid_get_lwp (ptid) != 0)
1248 lwp = ptid_get_lwp (ptid);
1249 else
1250 lwp = ptid_get_pid (ptid);
1251
1252 if (ptid_get_lwp (entry->id) == lwp)
1253 return 1;
1254
1255 return 0;
1256 }
1257
1258 struct lwp_info *
1259 find_lwp_pid (ptid_t ptid)
1260 {
1261 struct inferior_list_entry *thread
1262 = find_inferior (&all_threads, same_lwp, &ptid);
1263
1264 if (thread == NULL)
1265 return NULL;
1266
1267 return get_thread_lwp ((struct thread_info *) thread);
1268 }
1269
1270 /* Return the number of known LWPs in the tgid given by PID. */
1271
1272 static int
1273 num_lwps (int pid)
1274 {
1275 struct inferior_list_entry *inf, *tmp;
1276 int count = 0;
1277
1278 ALL_INFERIORS (&all_threads, inf, tmp)
1279 {
1280 if (ptid_get_pid (inf->id) == pid)
1281 count++;
1282 }
1283
1284 return count;
1285 }
1286
1287 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1288 their exits until all other threads in the group have exited. */
1289
1290 static void
1291 check_zombie_leaders (void)
1292 {
1293 struct process_info *proc, *tmp;
1294
1295 ALL_PROCESSES (proc, tmp)
1296 {
1297 pid_t leader_pid = pid_of (proc);
1298 struct lwp_info *leader_lp;
1299
1300 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1301
1302 if (debug_threads)
1303 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1304 "num_lwps=%d, zombie=%d\n",
1305 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1306 linux_proc_pid_is_zombie (leader_pid));
1307
1308 if (leader_lp != NULL
1309 /* Check if there are other threads in the group, as we may
1310 have raced with the inferior simply exiting. */
1311 && !last_thread_of_process_p (leader_pid)
1312 && linux_proc_pid_is_zombie (leader_pid))
1313 {
1314 /* A leader zombie can mean one of two things:
1315
1316 - It exited, and there's an exit status pending
1317 available, or only the leader exited (not the whole
1318 program). In the latter case, we can't waitpid the
1319 leader's exit status until all other threads are gone.
1320
1321 - There are 3 or more threads in the group, and a thread
1322 other than the leader exec'd. On an exec, the Linux
1323 kernel destroys all other threads (except the execing
1324 one) in the thread group, and resets the execing thread's
1325 tid to the tgid. No exit notification is sent for the
1326 execing thread -- from the ptracer's perspective, it
1327 appears as though the execing thread just vanishes.
1328 Until we reap all other threads except the leader and the
1329 execing thread, the leader will be zombie, and the
1330 execing thread will be in `D (disc sleep)'. As soon as
1331 all other threads are reaped, the execing thread changes
1332 it's tid to the tgid, and the previous (zombie) leader
1333 vanishes, giving place to the "new" leader. We could try
1334 distinguishing the exit and exec cases, by waiting once
1335 more, and seeing if something comes out, but it doesn't
1336 sound useful. The previous leader _does_ go away, and
1337 we'll re-add the new one once we see the exec event
1338 (which is just the same as what would happen if the
1339 previous leader did exit voluntarily before some other
1340 thread execs). */
1341
1342 if (debug_threads)
1343 fprintf (stderr,
1344 "CZL: Thread group leader %d zombie "
1345 "(it exited, or another thread execd).\n",
1346 leader_pid);
1347
1348 delete_lwp (leader_lp);
1349 }
1350 }
1351 }
1352
1353 /* Callback for `find_inferior'. Returns the first LWP that is not
1354 stopped. ARG is a PTID filter. */
1355
1356 static int
1357 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1358 {
1359 struct thread_info *thr = (struct thread_info *) entry;
1360 struct lwp_info *lwp;
1361 ptid_t filter = *(ptid_t *) arg;
1362
1363 if (!ptid_match (ptid_of (thr), filter))
1364 return 0;
1365
1366 lwp = get_thread_lwp (thr);
1367 if (!lwp->stopped)
1368 return 1;
1369
1370 return 0;
1371 }
1372
1373 /* This function should only be called if the LWP got a SIGTRAP.
1374
1375 Handle any tracepoint steps or hits. Return true if a tracepoint
1376 event was handled, 0 otherwise. */
1377
1378 static int
1379 handle_tracepoints (struct lwp_info *lwp)
1380 {
1381 struct thread_info *tinfo = get_lwp_thread (lwp);
1382 int tpoint_related_event = 0;
1383
1384 /* If this tracepoint hit causes a tracing stop, we'll immediately
1385 uninsert tracepoints. To do this, we temporarily pause all
1386 threads, unpatch away, and then unpause threads. We need to make
1387 sure the unpausing doesn't resume LWP too. */
1388 lwp->suspended++;
1389
1390 /* And we need to be sure that any all-threads-stopping doesn't try
1391 to move threads out of the jump pads, as it could deadlock the
1392 inferior (LWP could be in the jump pad, maybe even holding the
1393 lock.) */
1394
1395 /* Do any necessary step collect actions. */
1396 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1397
1398 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1399
1400 /* See if we just hit a tracepoint and do its main collect
1401 actions. */
1402 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1403
1404 lwp->suspended--;
1405
1406 gdb_assert (lwp->suspended == 0);
1407 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1408
1409 if (tpoint_related_event)
1410 {
1411 if (debug_threads)
1412 debug_printf ("got a tracepoint event\n");
1413 return 1;
1414 }
1415
1416 return 0;
1417 }
1418
1419 /* Convenience wrapper. Returns true if LWP is presently collecting a
1420 fast tracepoint. */
1421
1422 static int
1423 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1424 struct fast_tpoint_collect_status *status)
1425 {
1426 CORE_ADDR thread_area;
1427 struct thread_info *thread = get_lwp_thread (lwp);
1428
1429 if (the_low_target.get_thread_area == NULL)
1430 return 0;
1431
1432 /* Get the thread area address. This is used to recognize which
1433 thread is which when tracing with the in-process agent library.
1434 We don't read anything from the address, and treat it as opaque;
1435 it's the address itself that we assume is unique per-thread. */
1436 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1437 return 0;
1438
1439 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1440 }
1441
1442 /* The reason we resume in the caller, is because we want to be able
1443 to pass lwp->status_pending as WSTAT, and we need to clear
1444 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1445 refuses to resume. */
1446
1447 static int
1448 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1449 {
1450 struct thread_info *saved_inferior;
1451
1452 saved_inferior = current_inferior;
1453 current_inferior = get_lwp_thread (lwp);
1454
1455 if ((wstat == NULL
1456 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1457 && supports_fast_tracepoints ()
1458 && agent_loaded_p ())
1459 {
1460 struct fast_tpoint_collect_status status;
1461 int r;
1462
1463 if (debug_threads)
1464 debug_printf ("Checking whether LWP %ld needs to move out of the "
1465 "jump pad.\n",
1466 lwpid_of (current_inferior));
1467
1468 r = linux_fast_tracepoint_collecting (lwp, &status);
1469
1470 if (wstat == NULL
1471 || (WSTOPSIG (*wstat) != SIGILL
1472 && WSTOPSIG (*wstat) != SIGFPE
1473 && WSTOPSIG (*wstat) != SIGSEGV
1474 && WSTOPSIG (*wstat) != SIGBUS))
1475 {
1476 lwp->collecting_fast_tracepoint = r;
1477
1478 if (r != 0)
1479 {
1480 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1481 {
1482 /* Haven't executed the original instruction yet.
1483 Set breakpoint there, and wait till it's hit,
1484 then single-step until exiting the jump pad. */
1485 lwp->exit_jump_pad_bkpt
1486 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1487 }
1488
1489 if (debug_threads)
1490 debug_printf ("Checking whether LWP %ld needs to move out of "
1491 "the jump pad...it does\n",
1492 lwpid_of (current_inferior));
1493 current_inferior = saved_inferior;
1494
1495 return 1;
1496 }
1497 }
1498 else
1499 {
1500 /* If we get a synchronous signal while collecting, *and*
1501 while executing the (relocated) original instruction,
1502 reset the PC to point at the tpoint address, before
1503 reporting to GDB. Otherwise, it's an IPA lib bug: just
1504 report the signal to GDB, and pray for the best. */
1505
1506 lwp->collecting_fast_tracepoint = 0;
1507
1508 if (r != 0
1509 && (status.adjusted_insn_addr <= lwp->stop_pc
1510 && lwp->stop_pc < status.adjusted_insn_addr_end))
1511 {
1512 siginfo_t info;
1513 struct regcache *regcache;
1514
1515 /* The si_addr on a few signals references the address
1516 of the faulting instruction. Adjust that as
1517 well. */
1518 if ((WSTOPSIG (*wstat) == SIGILL
1519 || WSTOPSIG (*wstat) == SIGFPE
1520 || WSTOPSIG (*wstat) == SIGBUS
1521 || WSTOPSIG (*wstat) == SIGSEGV)
1522 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
1523 (PTRACE_TYPE_ARG3) 0, &info) == 0
1524 /* Final check just to make sure we don't clobber
1525 the siginfo of non-kernel-sent signals. */
1526 && (uintptr_t) info.si_addr == lwp->stop_pc)
1527 {
1528 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1529 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_inferior),
1530 (PTRACE_TYPE_ARG3) 0, &info);
1531 }
1532
1533 regcache = get_thread_regcache (current_inferior, 1);
1534 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1535 lwp->stop_pc = status.tpoint_addr;
1536
1537 /* Cancel any fast tracepoint lock this thread was
1538 holding. */
1539 force_unlock_trace_buffer ();
1540 }
1541
1542 if (lwp->exit_jump_pad_bkpt != NULL)
1543 {
1544 if (debug_threads)
1545 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1546 "stopping all threads momentarily.\n");
1547
1548 stop_all_lwps (1, lwp);
1549 cancel_breakpoints ();
1550
1551 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1552 lwp->exit_jump_pad_bkpt = NULL;
1553
1554 unstop_all_lwps (1, lwp);
1555
1556 gdb_assert (lwp->suspended >= 0);
1557 }
1558 }
1559 }
1560
1561 if (debug_threads)
1562 debug_printf ("Checking whether LWP %ld needs to move out of the "
1563 "jump pad...no\n",
1564 lwpid_of (current_inferior));
1565
1566 current_inferior = saved_inferior;
1567 return 0;
1568 }
1569
1570 /* Enqueue one signal in the "signals to report later when out of the
1571 jump pad" list. */
1572
1573 static void
1574 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1575 {
1576 struct pending_signals *p_sig;
1577 struct thread_info *thread = get_lwp_thread (lwp);
1578
1579 if (debug_threads)
1580 debug_printf ("Deferring signal %d for LWP %ld.\n",
1581 WSTOPSIG (*wstat), lwpid_of (thread));
1582
1583 if (debug_threads)
1584 {
1585 struct pending_signals *sig;
1586
1587 for (sig = lwp->pending_signals_to_report;
1588 sig != NULL;
1589 sig = sig->prev)
1590 debug_printf (" Already queued %d\n",
1591 sig->signal);
1592
1593 debug_printf (" (no more currently queued signals)\n");
1594 }
1595
1596 /* Don't enqueue non-RT signals if they are already in the deferred
1597 queue. (SIGSTOP being the easiest signal to see ending up here
1598 twice) */
1599 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1600 {
1601 struct pending_signals *sig;
1602
1603 for (sig = lwp->pending_signals_to_report;
1604 sig != NULL;
1605 sig = sig->prev)
1606 {
1607 if (sig->signal == WSTOPSIG (*wstat))
1608 {
1609 if (debug_threads)
1610 debug_printf ("Not requeuing already queued non-RT signal %d"
1611 " for LWP %ld\n",
1612 sig->signal,
1613 lwpid_of (thread));
1614 return;
1615 }
1616 }
1617 }
1618
1619 p_sig = xmalloc (sizeof (*p_sig));
1620 p_sig->prev = lwp->pending_signals_to_report;
1621 p_sig->signal = WSTOPSIG (*wstat);
1622 memset (&p_sig->info, 0, sizeof (siginfo_t));
1623 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1624 &p_sig->info);
1625
1626 lwp->pending_signals_to_report = p_sig;
1627 }
1628
1629 /* Dequeue one signal from the "signals to report later when out of
1630 the jump pad" list. */
1631
1632 static int
1633 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1634 {
1635 struct thread_info *thread = get_lwp_thread (lwp);
1636
1637 if (lwp->pending_signals_to_report != NULL)
1638 {
1639 struct pending_signals **p_sig;
1640
1641 p_sig = &lwp->pending_signals_to_report;
1642 while ((*p_sig)->prev != NULL)
1643 p_sig = &(*p_sig)->prev;
1644
1645 *wstat = W_STOPCODE ((*p_sig)->signal);
1646 if ((*p_sig)->info.si_signo != 0)
1647 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1648 &(*p_sig)->info);
1649 free (*p_sig);
1650 *p_sig = NULL;
1651
1652 if (debug_threads)
1653 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1654 WSTOPSIG (*wstat), lwpid_of (thread));
1655
1656 if (debug_threads)
1657 {
1658 struct pending_signals *sig;
1659
1660 for (sig = lwp->pending_signals_to_report;
1661 sig != NULL;
1662 sig = sig->prev)
1663 debug_printf (" Still queued %d\n",
1664 sig->signal);
1665
1666 debug_printf (" (no more queued signals)\n");
1667 }
1668
1669 return 1;
1670 }
1671
1672 return 0;
1673 }
1674
1675 /* Arrange for a breakpoint to be hit again later. We don't keep the
1676 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1677 will handle the current event, eventually we will resume this LWP,
1678 and this breakpoint will trap again. */
1679
1680 static int
1681 cancel_breakpoint (struct lwp_info *lwp)
1682 {
1683 struct thread_info *saved_inferior;
1684
1685 /* There's nothing to do if we don't support breakpoints. */
1686 if (!supports_breakpoints ())
1687 return 0;
1688
1689 /* breakpoint_at reads from current inferior. */
1690 saved_inferior = current_inferior;
1691 current_inferior = get_lwp_thread (lwp);
1692
1693 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1694 {
1695 if (debug_threads)
1696 debug_printf ("CB: Push back breakpoint for %s\n",
1697 target_pid_to_str (ptid_of (current_inferior)));
1698
1699 /* Back up the PC if necessary. */
1700 if (the_low_target.decr_pc_after_break)
1701 {
1702 struct regcache *regcache
1703 = get_thread_regcache (current_inferior, 1);
1704 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1705 }
1706
1707 current_inferior = saved_inferior;
1708 return 1;
1709 }
1710 else
1711 {
1712 if (debug_threads)
1713 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1714 paddress (lwp->stop_pc),
1715 target_pid_to_str (ptid_of (current_inferior)));
1716 }
1717
1718 current_inferior = saved_inferior;
1719 return 0;
1720 }
1721
1722 /* Do low-level handling of the event, and check if we should go on
1723 and pass it to caller code. Return the affected lwp if we are, or
1724 NULL otherwise. */
1725
1726 static struct lwp_info *
1727 linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1728 {
1729 struct lwp_info *child;
1730 struct thread_info *thread;
1731
1732 child = find_lwp_pid (pid_to_ptid (lwpid));
1733
1734 /* If we didn't find a process, one of two things presumably happened:
1735 - A process we started and then detached from has exited. Ignore it.
1736 - A process we are controlling has forked and the new child's stop
1737 was reported to us by the kernel. Save its PID. */
1738 if (child == NULL && WIFSTOPPED (wstat))
1739 {
1740 add_to_pid_list (&stopped_pids, lwpid, wstat);
1741 return NULL;
1742 }
1743 else if (child == NULL)
1744 return NULL;
1745
1746 thread = get_lwp_thread (child);
1747
1748 child->stopped = 1;
1749
1750 child->last_status = wstat;
1751
1752 if (WIFSTOPPED (wstat))
1753 {
1754 struct process_info *proc;
1755
1756 /* Architecture-specific setup after inferior is running. This
1757 needs to happen after we have attached to the inferior and it
1758 is stopped for the first time, but before we access any
1759 inferior registers. */
1760 proc = find_process_pid (pid_of (thread));
1761 if (proc->private->new_inferior)
1762 {
1763 struct thread_info *saved_inferior;
1764
1765 saved_inferior = current_inferior;
1766 current_inferior = thread;
1767
1768 the_low_target.arch_setup ();
1769
1770 current_inferior = saved_inferior;
1771
1772 proc->private->new_inferior = 0;
1773 }
1774 }
1775
1776 /* Store the STOP_PC, with adjustment applied. This depends on the
1777 architecture being defined already (so that CHILD has a valid
1778 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1779 not). */
1780 if (WIFSTOPPED (wstat))
1781 {
1782 if (debug_threads
1783 && the_low_target.get_pc != NULL)
1784 {
1785 struct thread_info *saved_inferior;
1786 struct regcache *regcache;
1787 CORE_ADDR pc;
1788
1789 saved_inferior = current_inferior;
1790 current_inferior = thread;
1791 regcache = get_thread_regcache (current_inferior, 1);
1792 pc = (*the_low_target.get_pc) (regcache);
1793 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
1794 current_inferior = saved_inferior;
1795 }
1796
1797 child->stop_pc = get_stop_pc (child);
1798 }
1799
1800 /* Fetch the possibly triggered data watchpoint info and store it in
1801 CHILD.
1802
1803 On some archs, like x86, that use debug registers to set
1804 watchpoints, it's possible that the way to know which watched
1805 address trapped, is to check the register that is used to select
1806 which address to watch. Problem is, between setting the
1807 watchpoint and reading back which data address trapped, the user
1808 may change the set of watchpoints, and, as a consequence, GDB
1809 changes the debug registers in the inferior. To avoid reading
1810 back a stale stopped-data-address when that happens, we cache in
1811 LP the fact that a watchpoint trapped, and the corresponding data
1812 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1813 changes the debug registers meanwhile, we have the cached data we
1814 can rely on. */
1815
1816 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1817 {
1818 if (the_low_target.stopped_by_watchpoint == NULL)
1819 {
1820 child->stopped_by_watchpoint = 0;
1821 }
1822 else
1823 {
1824 struct thread_info *saved_inferior;
1825
1826 saved_inferior = current_inferior;
1827 current_inferior = thread;
1828
1829 child->stopped_by_watchpoint
1830 = the_low_target.stopped_by_watchpoint ();
1831
1832 if (child->stopped_by_watchpoint)
1833 {
1834 if (the_low_target.stopped_data_address != NULL)
1835 child->stopped_data_address
1836 = the_low_target.stopped_data_address ();
1837 else
1838 child->stopped_data_address = 0;
1839 }
1840
1841 current_inferior = saved_inferior;
1842 }
1843 }
1844
1845 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1846 {
1847 linux_enable_event_reporting (lwpid);
1848 child->must_set_ptrace_flags = 0;
1849 }
1850
1851 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1852 && wstat >> 16 != 0)
1853 {
1854 handle_extended_wait (child, wstat);
1855 return NULL;
1856 }
1857
1858 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1859 && child->stop_expected)
1860 {
1861 if (debug_threads)
1862 debug_printf ("Expected stop.\n");
1863 child->stop_expected = 0;
1864
1865 if (thread->last_resume_kind == resume_stop)
1866 {
1867 /* We want to report the stop to the core. Treat the
1868 SIGSTOP as a normal event. */
1869 }
1870 else if (stopping_threads != NOT_STOPPING_THREADS)
1871 {
1872 /* Stopping threads. We don't want this SIGSTOP to end up
1873 pending in the FILTER_PTID handling below. */
1874 return NULL;
1875 }
1876 else
1877 {
1878 /* Filter out the event. */
1879 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1880 return NULL;
1881 }
1882 }
1883
1884 /* Check if the thread has exited. */
1885 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1886 && num_lwps (pid_of (thread)) > 1)
1887 {
1888 if (debug_threads)
1889 debug_printf ("LLW: %d exited.\n", lwpid);
1890
1891 /* If there is at least one more LWP, then the exit signal
1892 was not the end of the debugged application and should be
1893 ignored. */
1894 delete_lwp (child);
1895 return NULL;
1896 }
1897
1898 if (!ptid_match (ptid_of (thread), filter_ptid))
1899 {
1900 if (debug_threads)
1901 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1902 lwpid, wstat);
1903
1904 if (WIFSTOPPED (wstat))
1905 {
1906 child->status_pending_p = 1;
1907 child->status_pending = wstat;
1908
1909 if (WSTOPSIG (wstat) != SIGSTOP)
1910 {
1911 /* Cancel breakpoint hits. The breakpoint may be
1912 removed before we fetch events from this process to
1913 report to the core. It is best not to assume the
1914 moribund breakpoints heuristic always handles these
1915 cases --- it could be too many events go through to
1916 the core before this one is handled. All-stop always
1917 cancels breakpoint hits in all threads. */
1918 if (non_stop
1919 && WSTOPSIG (wstat) == SIGTRAP
1920 && cancel_breakpoint (child))
1921 {
1922 /* Throw away the SIGTRAP. */
1923 child->status_pending_p = 0;
1924
1925 if (debug_threads)
1926 debug_printf ("LLW: LWP %d hit a breakpoint while"
1927 " waiting for another process;"
1928 " cancelled it\n", lwpid);
1929 }
1930 }
1931 }
1932 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1933 {
1934 if (debug_threads)
1935 debug_printf ("LLWE: process %d exited while fetching "
1936 "event from another LWP\n", lwpid);
1937
1938 /* This was the last lwp in the process. Since events are
1939 serialized to GDB core, and we can't report this one
1940 right now, but GDB core and the other target layers will
1941 want to be notified about the exit code/signal, leave the
1942 status pending for the next time we're able to report
1943 it. */
1944 mark_lwp_dead (child, wstat);
1945 }
1946
1947 return NULL;
1948 }
1949
1950 return child;
1951 }
1952
1953 /* When the event-loop is doing a step-over, this points at the thread
1954 being stepped. */
1955 ptid_t step_over_bkpt;
1956
1957 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1958 match FILTER_PTID (leaving others pending). The PTIDs can be:
1959 minus_one_ptid, to specify any child; a pid PTID, specifying all
1960 lwps of a thread group; or a PTID representing a single lwp. Store
1961 the stop status through the status pointer WSTAT. OPTIONS is
1962 passed to the waitpid call. Return 0 if no event was found and
1963 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1964 was found. Return the PID of the stopped child otherwise. */
1965
1966 static int
1967 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1968 int *wstatp, int options)
1969 {
1970 struct thread_info *event_thread;
1971 struct lwp_info *event_child, *requested_child;
1972 sigset_t block_mask, prev_mask;
1973
1974 retry:
1975 /* N.B. event_thread points to the thread_info struct that contains
1976 event_child. Keep them in sync. */
1977 event_thread = NULL;
1978 event_child = NULL;
1979 requested_child = NULL;
1980
1981 /* Check for a lwp with a pending status. */
1982
1983 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
1984 {
1985 event_thread = (struct thread_info *)
1986 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
1987 if (event_thread != NULL)
1988 event_child = get_thread_lwp (event_thread);
1989 if (debug_threads && event_thread)
1990 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
1991 }
1992 else if (!ptid_equal (filter_ptid, null_ptid))
1993 {
1994 requested_child = find_lwp_pid (filter_ptid);
1995
1996 if (stopping_threads == NOT_STOPPING_THREADS
1997 && requested_child->status_pending_p
1998 && requested_child->collecting_fast_tracepoint)
1999 {
2000 enqueue_one_deferred_signal (requested_child,
2001 &requested_child->status_pending);
2002 requested_child->status_pending_p = 0;
2003 requested_child->status_pending = 0;
2004 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2005 }
2006
2007 if (requested_child->suspended
2008 && requested_child->status_pending_p)
2009 fatal ("requesting an event out of a suspended child?");
2010
2011 if (requested_child->status_pending_p)
2012 {
2013 event_child = requested_child;
2014 event_thread = get_lwp_thread (event_child);
2015 }
2016 }
2017
2018 if (event_child != NULL)
2019 {
2020 if (debug_threads)
2021 debug_printf ("Got an event from pending child %ld (%04x)\n",
2022 lwpid_of (event_thread), event_child->status_pending);
2023 *wstatp = event_child->status_pending;
2024 event_child->status_pending_p = 0;
2025 event_child->status_pending = 0;
2026 current_inferior = event_thread;
2027 return lwpid_of (event_thread);
2028 }
2029
2030 /* But if we don't find a pending event, we'll have to wait.
2031
2032 We only enter this loop if no process has a pending wait status.
2033 Thus any action taken in response to a wait status inside this
2034 loop is responding as soon as we detect the status, not after any
2035 pending events. */
2036
2037 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2038 all signals while here. */
2039 sigfillset (&block_mask);
2040 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2041
2042 while (event_child == NULL)
2043 {
2044 pid_t ret = 0;
2045
2046 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2047 quirks:
2048
2049 - If the thread group leader exits while other threads in the
2050 thread group still exist, waitpid(TGID, ...) hangs. That
2051 waitpid won't return an exit status until the other threads
2052 in the group are reaped.
2053
2054 - When a non-leader thread execs, that thread just vanishes
2055 without reporting an exit (so we'd hang if we waited for it
2056 explicitly in that case). The exec event is reported to
2057 the TGID pid (although we don't currently enable exec
2058 events). */
2059 errno = 0;
2060 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2061
2062 if (debug_threads)
2063 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2064 ret, errno ? strerror (errno) : "ERRNO-OK");
2065
2066 if (ret > 0)
2067 {
2068 if (debug_threads)
2069 {
2070 debug_printf ("LLW: waitpid %ld received %s\n",
2071 (long) ret, status_to_str (*wstatp));
2072 }
2073
2074 event_child = linux_low_filter_event (filter_ptid,
2075 ret, *wstatp);
2076 if (event_child != NULL)
2077 {
2078 /* We got an event to report to the core. */
2079 event_thread = get_lwp_thread (event_child);
2080 break;
2081 }
2082
2083 /* Retry until nothing comes out of waitpid. A single
2084 SIGCHLD can indicate more than one child stopped. */
2085 continue;
2086 }
2087
2088 /* Check for zombie thread group leaders. Those can't be reaped
2089 until all other threads in the thread group are. */
2090 check_zombie_leaders ();
2091
2092 /* If there are no resumed children left in the set of LWPs we
2093 want to wait for, bail. We can't just block in
2094 waitpid/sigsuspend, because lwps might have been left stopped
2095 in trace-stop state, and we'd be stuck forever waiting for
2096 their status to change (which would only happen if we resumed
2097 them). Even if WNOHANG is set, this return code is preferred
2098 over 0 (below), as it is more detailed. */
2099 if ((find_inferior (&all_threads,
2100 not_stopped_callback,
2101 &wait_ptid) == NULL))
2102 {
2103 if (debug_threads)
2104 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2105 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2106 return -1;
2107 }
2108
2109 /* No interesting event to report to the caller. */
2110 if ((options & WNOHANG))
2111 {
2112 if (debug_threads)
2113 debug_printf ("WNOHANG set, no event found\n");
2114
2115 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2116 return 0;
2117 }
2118
2119 /* Block until we get an event reported with SIGCHLD. */
2120 if (debug_threads)
2121 debug_printf ("sigsuspend'ing\n");
2122
2123 sigsuspend (&prev_mask);
2124 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2125 goto retry;
2126 }
2127
2128 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2129
2130 current_inferior = event_thread;
2131
2132 /* Check for thread exit. */
2133 if (! WIFSTOPPED (*wstatp))
2134 {
2135 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2136
2137 if (debug_threads)
2138 debug_printf ("LWP %d is the last lwp of process. "
2139 "Process %ld exiting.\n",
2140 pid_of (event_thread), lwpid_of (event_thread));
2141 return lwpid_of (event_thread);
2142 }
2143
2144 return lwpid_of (event_thread);
2145 }
2146
2147 /* Wait for an event from child(ren) PTID. PTIDs can be:
2148 minus_one_ptid, to specify any child; a pid PTID, specifying all
2149 lwps of a thread group; or a PTID representing a single lwp. Store
2150 the stop status through the status pointer WSTAT. OPTIONS is
2151 passed to the waitpid call. Return 0 if no event was found and
2152 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2153 was found. Return the PID of the stopped child otherwise. */
2154
2155 static int
2156 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2157 {
2158 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2159 }
2160
2161 /* Count the LWP's that have had events. */
2162
2163 static int
2164 count_events_callback (struct inferior_list_entry *entry, void *data)
2165 {
2166 struct thread_info *thread = (struct thread_info *) entry;
2167 struct lwp_info *lp = get_thread_lwp (thread);
2168 int *count = data;
2169
2170 gdb_assert (count != NULL);
2171
2172 /* Count only resumed LWPs that have a SIGTRAP event pending that
2173 should be reported to GDB. */
2174 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2175 && thread->last_resume_kind != resume_stop
2176 && lp->status_pending_p
2177 && WIFSTOPPED (lp->status_pending)
2178 && WSTOPSIG (lp->status_pending) == SIGTRAP
2179 && !breakpoint_inserted_here (lp->stop_pc))
2180 (*count)++;
2181
2182 return 0;
2183 }
2184
2185 /* Select the LWP (if any) that is currently being single-stepped. */
2186
2187 static int
2188 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2189 {
2190 struct thread_info *thread = (struct thread_info *) entry;
2191 struct lwp_info *lp = get_thread_lwp (thread);
2192
2193 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2194 && thread->last_resume_kind == resume_step
2195 && lp->status_pending_p)
2196 return 1;
2197 else
2198 return 0;
2199 }
2200
2201 /* Select the Nth LWP that has had a SIGTRAP event that should be
2202 reported to GDB. */
2203
2204 static int
2205 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2206 {
2207 struct thread_info *thread = (struct thread_info *) entry;
2208 struct lwp_info *lp = get_thread_lwp (thread);
2209 int *selector = data;
2210
2211 gdb_assert (selector != NULL);
2212
2213 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2214 if (thread->last_resume_kind != resume_stop
2215 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2216 && lp->status_pending_p
2217 && WIFSTOPPED (lp->status_pending)
2218 && WSTOPSIG (lp->status_pending) == SIGTRAP
2219 && !breakpoint_inserted_here (lp->stop_pc))
2220 if ((*selector)-- == 0)
2221 return 1;
2222
2223 return 0;
2224 }
2225
2226 static int
2227 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2228 {
2229 struct thread_info *thread = (struct thread_info *) entry;
2230 struct lwp_info *lp = get_thread_lwp (thread);
2231 struct lwp_info *event_lp = data;
2232
2233 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2234 if (lp == event_lp)
2235 return 0;
2236
2237 /* If a LWP other than the LWP that we're reporting an event for has
2238 hit a GDB breakpoint (as opposed to some random trap signal),
2239 then just arrange for it to hit it again later. We don't keep
2240 the SIGTRAP status and don't forward the SIGTRAP signal to the
2241 LWP. We will handle the current event, eventually we will resume
2242 all LWPs, and this one will get its breakpoint trap again.
2243
2244 If we do not do this, then we run the risk that the user will
2245 delete or disable the breakpoint, but the LWP will have already
2246 tripped on it. */
2247
2248 if (thread->last_resume_kind != resume_stop
2249 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2250 && lp->status_pending_p
2251 && WIFSTOPPED (lp->status_pending)
2252 && WSTOPSIG (lp->status_pending) == SIGTRAP
2253 && !lp->stepping
2254 && !lp->stopped_by_watchpoint
2255 && cancel_breakpoint (lp))
2256 /* Throw away the SIGTRAP. */
2257 lp->status_pending_p = 0;
2258
2259 return 0;
2260 }
2261
2262 static void
2263 linux_cancel_breakpoints (void)
2264 {
2265 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
2266 }
2267
2268 /* Select one LWP out of those that have events pending. */
2269
2270 static void
2271 select_event_lwp (struct lwp_info **orig_lp)
2272 {
2273 int num_events = 0;
2274 int random_selector;
2275 struct thread_info *event_thread;
2276
2277 /* Give preference to any LWP that is being single-stepped. */
2278 event_thread
2279 = (struct thread_info *) find_inferior (&all_threads,
2280 select_singlestep_lwp_callback,
2281 NULL);
2282 if (event_thread != NULL)
2283 {
2284 if (debug_threads)
2285 debug_printf ("SEL: Select single-step %s\n",
2286 target_pid_to_str (ptid_of (event_thread)));
2287 }
2288 else
2289 {
2290 /* No single-stepping LWP. Select one at random, out of those
2291 which have had SIGTRAP events. */
2292
2293 /* First see how many SIGTRAP events we have. */
2294 find_inferior (&all_threads, count_events_callback, &num_events);
2295
2296 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2297 random_selector = (int)
2298 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2299
2300 if (debug_threads && num_events > 1)
2301 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2302 num_events, random_selector);
2303
2304 event_thread
2305 = (struct thread_info *) find_inferior (&all_threads,
2306 select_event_lwp_callback,
2307 &random_selector);
2308 }
2309
2310 if (event_thread != NULL)
2311 {
2312 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2313
2314 /* Switch the event LWP. */
2315 *orig_lp = event_lp;
2316 }
2317 }
2318
2319 /* Decrement the suspend count of an LWP. */
2320
2321 static int
2322 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2323 {
2324 struct thread_info *thread = (struct thread_info *) entry;
2325 struct lwp_info *lwp = get_thread_lwp (thread);
2326
2327 /* Ignore EXCEPT. */
2328 if (lwp == except)
2329 return 0;
2330
2331 lwp->suspended--;
2332
2333 gdb_assert (lwp->suspended >= 0);
2334 return 0;
2335 }
2336
2337 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2338 NULL. */
2339
2340 static void
2341 unsuspend_all_lwps (struct lwp_info *except)
2342 {
2343 find_inferior (&all_threads, unsuspend_one_lwp, except);
2344 }
2345
2346 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2347 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2348 void *data);
2349 static int lwp_running (struct inferior_list_entry *entry, void *data);
2350 static ptid_t linux_wait_1 (ptid_t ptid,
2351 struct target_waitstatus *ourstatus,
2352 int target_options);
2353
2354 /* Stabilize threads (move out of jump pads).
2355
2356 If a thread is midway collecting a fast tracepoint, we need to
2357 finish the collection and move it out of the jump pad before
2358 reporting the signal.
2359
2360 This avoids recursion while collecting (when a signal arrives
2361 midway, and the signal handler itself collects), which would trash
2362 the trace buffer. In case the user set a breakpoint in a signal
2363 handler, this avoids the backtrace showing the jump pad, etc..
2364 Most importantly, there are certain things we can't do safely if
2365 threads are stopped in a jump pad (or in its callee's). For
2366 example:
2367
2368 - starting a new trace run. A thread still collecting the
2369 previous run, could trash the trace buffer when resumed. The trace
2370 buffer control structures would have been reset but the thread had
2371 no way to tell. The thread could even midway memcpy'ing to the
2372 buffer, which would mean that when resumed, it would clobber the
2373 trace buffer that had been set for a new run.
2374
2375 - we can't rewrite/reuse the jump pads for new tracepoints
2376 safely. Say you do tstart while a thread is stopped midway while
2377 collecting. When the thread is later resumed, it finishes the
2378 collection, and returns to the jump pad, to execute the original
2379 instruction that was under the tracepoint jump at the time the
2380 older run had been started. If the jump pad had been rewritten
2381 since for something else in the new run, the thread would now
2382 execute the wrong / random instructions. */
2383
2384 static void
2385 linux_stabilize_threads (void)
2386 {
2387 struct thread_info *save_inferior;
2388 struct thread_info *thread_stuck;
2389
2390 thread_stuck
2391 = (struct thread_info *) find_inferior (&all_threads,
2392 stuck_in_jump_pad_callback,
2393 NULL);
2394 if (thread_stuck != NULL)
2395 {
2396 if (debug_threads)
2397 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2398 lwpid_of (thread_stuck));
2399 return;
2400 }
2401
2402 save_inferior = current_inferior;
2403
2404 stabilizing_threads = 1;
2405
2406 /* Kick 'em all. */
2407 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2408
2409 /* Loop until all are stopped out of the jump pads. */
2410 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2411 {
2412 struct target_waitstatus ourstatus;
2413 struct lwp_info *lwp;
2414 int wstat;
2415
2416 /* Note that we go through the full wait even loop. While
2417 moving threads out of jump pad, we need to be able to step
2418 over internal breakpoints and such. */
2419 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2420
2421 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2422 {
2423 lwp = get_thread_lwp (current_inferior);
2424
2425 /* Lock it. */
2426 lwp->suspended++;
2427
2428 if (ourstatus.value.sig != GDB_SIGNAL_0
2429 || current_inferior->last_resume_kind == resume_stop)
2430 {
2431 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2432 enqueue_one_deferred_signal (lwp, &wstat);
2433 }
2434 }
2435 }
2436
2437 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2438
2439 stabilizing_threads = 0;
2440
2441 current_inferior = save_inferior;
2442
2443 if (debug_threads)
2444 {
2445 thread_stuck
2446 = (struct thread_info *) find_inferior (&all_threads,
2447 stuck_in_jump_pad_callback,
2448 NULL);
2449 if (thread_stuck != NULL)
2450 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2451 lwpid_of (thread_stuck));
2452 }
2453 }
2454
2455 /* Wait for process, returns status. */
2456
2457 static ptid_t
2458 linux_wait_1 (ptid_t ptid,
2459 struct target_waitstatus *ourstatus, int target_options)
2460 {
2461 int w;
2462 struct lwp_info *event_child;
2463 int options;
2464 int pid;
2465 int step_over_finished;
2466 int bp_explains_trap;
2467 int maybe_internal_trap;
2468 int report_to_gdb;
2469 int trace_event;
2470 int in_step_range;
2471
2472 if (debug_threads)
2473 {
2474 debug_enter ();
2475 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2476 }
2477
2478 /* Translate generic target options into linux options. */
2479 options = __WALL;
2480 if (target_options & TARGET_WNOHANG)
2481 options |= WNOHANG;
2482
2483 retry:
2484 bp_explains_trap = 0;
2485 trace_event = 0;
2486 in_step_range = 0;
2487 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2488
2489 /* If we were only supposed to resume one thread, only wait for
2490 that thread - if it's still alive. If it died, however - which
2491 can happen if we're coming from the thread death case below -
2492 then we need to make sure we restart the other threads. We could
2493 pick a thread at random or restart all; restarting all is less
2494 arbitrary. */
2495 if (!non_stop
2496 && !ptid_equal (cont_thread, null_ptid)
2497 && !ptid_equal (cont_thread, minus_one_ptid))
2498 {
2499 struct thread_info *thread;
2500
2501 thread = (struct thread_info *) find_inferior_id (&all_threads,
2502 cont_thread);
2503
2504 /* No stepping, no signal - unless one is pending already, of course. */
2505 if (thread == NULL)
2506 {
2507 struct thread_resume resume_info;
2508 resume_info.thread = minus_one_ptid;
2509 resume_info.kind = resume_continue;
2510 resume_info.sig = 0;
2511 linux_resume (&resume_info, 1);
2512 }
2513 else
2514 ptid = cont_thread;
2515 }
2516
2517 if (ptid_equal (step_over_bkpt, null_ptid))
2518 pid = linux_wait_for_event (ptid, &w, options);
2519 else
2520 {
2521 if (debug_threads)
2522 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2523 target_pid_to_str (step_over_bkpt));
2524 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2525 }
2526
2527 if (pid == 0)
2528 {
2529 gdb_assert (target_options & TARGET_WNOHANG);
2530
2531 if (debug_threads)
2532 {
2533 debug_printf ("linux_wait_1 ret = null_ptid, "
2534 "TARGET_WAITKIND_IGNORE\n");
2535 debug_exit ();
2536 }
2537
2538 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2539 return null_ptid;
2540 }
2541 else if (pid == -1)
2542 {
2543 if (debug_threads)
2544 {
2545 debug_printf ("linux_wait_1 ret = null_ptid, "
2546 "TARGET_WAITKIND_NO_RESUMED\n");
2547 debug_exit ();
2548 }
2549
2550 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2551 return null_ptid;
2552 }
2553
2554 event_child = get_thread_lwp (current_inferior);
2555
2556 /* linux_wait_for_event only returns an exit status for the last
2557 child of a process. Report it. */
2558 if (WIFEXITED (w) || WIFSIGNALED (w))
2559 {
2560 if (WIFEXITED (w))
2561 {
2562 ourstatus->kind = TARGET_WAITKIND_EXITED;
2563 ourstatus->value.integer = WEXITSTATUS (w);
2564
2565 if (debug_threads)
2566 {
2567 debug_printf ("linux_wait_1 ret = %s, exited with "
2568 "retcode %d\n",
2569 target_pid_to_str (ptid_of (current_inferior)),
2570 WEXITSTATUS (w));
2571 debug_exit ();
2572 }
2573 }
2574 else
2575 {
2576 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2577 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2578
2579 if (debug_threads)
2580 {
2581 debug_printf ("linux_wait_1 ret = %s, terminated with "
2582 "signal %d\n",
2583 target_pid_to_str (ptid_of (current_inferior)),
2584 WTERMSIG (w));
2585 debug_exit ();
2586 }
2587 }
2588
2589 return ptid_of (current_inferior);
2590 }
2591
2592 /* If this event was not handled before, and is not a SIGTRAP, we
2593 report it. SIGILL and SIGSEGV are also treated as traps in case
2594 a breakpoint is inserted at the current PC. If this target does
2595 not support internal breakpoints at all, we also report the
2596 SIGTRAP without further processing; it's of no concern to us. */
2597 maybe_internal_trap
2598 = (supports_breakpoints ()
2599 && (WSTOPSIG (w) == SIGTRAP
2600 || ((WSTOPSIG (w) == SIGILL
2601 || WSTOPSIG (w) == SIGSEGV)
2602 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2603
2604 if (maybe_internal_trap)
2605 {
2606 /* Handle anything that requires bookkeeping before deciding to
2607 report the event or continue waiting. */
2608
2609 /* First check if we can explain the SIGTRAP with an internal
2610 breakpoint, or if we should possibly report the event to GDB.
2611 Do this before anything that may remove or insert a
2612 breakpoint. */
2613 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2614
2615 /* We have a SIGTRAP, possibly a step-over dance has just
2616 finished. If so, tweak the state machine accordingly,
2617 reinsert breakpoints and delete any reinsert (software
2618 single-step) breakpoints. */
2619 step_over_finished = finish_step_over (event_child);
2620
2621 /* Now invoke the callbacks of any internal breakpoints there. */
2622 check_breakpoints (event_child->stop_pc);
2623
2624 /* Handle tracepoint data collecting. This may overflow the
2625 trace buffer, and cause a tracing stop, removing
2626 breakpoints. */
2627 trace_event = handle_tracepoints (event_child);
2628
2629 if (bp_explains_trap)
2630 {
2631 /* If we stepped or ran into an internal breakpoint, we've
2632 already handled it. So next time we resume (from this
2633 PC), we should step over it. */
2634 if (debug_threads)
2635 debug_printf ("Hit a gdbserver breakpoint.\n");
2636
2637 if (breakpoint_here (event_child->stop_pc))
2638 event_child->need_step_over = 1;
2639 }
2640 }
2641 else
2642 {
2643 /* We have some other signal, possibly a step-over dance was in
2644 progress, and it should be cancelled too. */
2645 step_over_finished = finish_step_over (event_child);
2646 }
2647
2648 /* We have all the data we need. Either report the event to GDB, or
2649 resume threads and keep waiting for more. */
2650
2651 /* If we're collecting a fast tracepoint, finish the collection and
2652 move out of the jump pad before delivering a signal. See
2653 linux_stabilize_threads. */
2654
2655 if (WIFSTOPPED (w)
2656 && WSTOPSIG (w) != SIGTRAP
2657 && supports_fast_tracepoints ()
2658 && agent_loaded_p ())
2659 {
2660 if (debug_threads)
2661 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2662 "to defer or adjust it.\n",
2663 WSTOPSIG (w), lwpid_of (current_inferior));
2664
2665 /* Allow debugging the jump pad itself. */
2666 if (current_inferior->last_resume_kind != resume_step
2667 && maybe_move_out_of_jump_pad (event_child, &w))
2668 {
2669 enqueue_one_deferred_signal (event_child, &w);
2670
2671 if (debug_threads)
2672 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2673 WSTOPSIG (w), lwpid_of (current_inferior));
2674
2675 linux_resume_one_lwp (event_child, 0, 0, NULL);
2676 goto retry;
2677 }
2678 }
2679
2680 if (event_child->collecting_fast_tracepoint)
2681 {
2682 if (debug_threads)
2683 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2684 "Check if we're already there.\n",
2685 lwpid_of (current_inferior),
2686 event_child->collecting_fast_tracepoint);
2687
2688 trace_event = 1;
2689
2690 event_child->collecting_fast_tracepoint
2691 = linux_fast_tracepoint_collecting (event_child, NULL);
2692
2693 if (event_child->collecting_fast_tracepoint != 1)
2694 {
2695 /* No longer need this breakpoint. */
2696 if (event_child->exit_jump_pad_bkpt != NULL)
2697 {
2698 if (debug_threads)
2699 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2700 "stopping all threads momentarily.\n");
2701
2702 /* Other running threads could hit this breakpoint.
2703 We don't handle moribund locations like GDB does,
2704 instead we always pause all threads when removing
2705 breakpoints, so that any step-over or
2706 decr_pc_after_break adjustment is always taken
2707 care of while the breakpoint is still
2708 inserted. */
2709 stop_all_lwps (1, event_child);
2710 cancel_breakpoints ();
2711
2712 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2713 event_child->exit_jump_pad_bkpt = NULL;
2714
2715 unstop_all_lwps (1, event_child);
2716
2717 gdb_assert (event_child->suspended >= 0);
2718 }
2719 }
2720
2721 if (event_child->collecting_fast_tracepoint == 0)
2722 {
2723 if (debug_threads)
2724 debug_printf ("fast tracepoint finished "
2725 "collecting successfully.\n");
2726
2727 /* We may have a deferred signal to report. */
2728 if (dequeue_one_deferred_signal (event_child, &w))
2729 {
2730 if (debug_threads)
2731 debug_printf ("dequeued one signal.\n");
2732 }
2733 else
2734 {
2735 if (debug_threads)
2736 debug_printf ("no deferred signals.\n");
2737
2738 if (stabilizing_threads)
2739 {
2740 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2741 ourstatus->value.sig = GDB_SIGNAL_0;
2742
2743 if (debug_threads)
2744 {
2745 debug_printf ("linux_wait_1 ret = %s, stopped "
2746 "while stabilizing threads\n",
2747 target_pid_to_str (ptid_of (current_inferior)));
2748 debug_exit ();
2749 }
2750
2751 return ptid_of (current_inferior);
2752 }
2753 }
2754 }
2755 }
2756
2757 /* Check whether GDB would be interested in this event. */
2758
2759 /* If GDB is not interested in this signal, don't stop other
2760 threads, and don't report it to GDB. Just resume the inferior
2761 right away. We do this for threading-related signals as well as
2762 any that GDB specifically requested we ignore. But never ignore
2763 SIGSTOP if we sent it ourselves, and do not ignore signals when
2764 stepping - they may require special handling to skip the signal
2765 handler. */
2766 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2767 thread library? */
2768 if (WIFSTOPPED (w)
2769 && current_inferior->last_resume_kind != resume_step
2770 && (
2771 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2772 (current_process ()->private->thread_db != NULL
2773 && (WSTOPSIG (w) == __SIGRTMIN
2774 || WSTOPSIG (w) == __SIGRTMIN + 1))
2775 ||
2776 #endif
2777 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2778 && !(WSTOPSIG (w) == SIGSTOP
2779 && current_inferior->last_resume_kind == resume_stop))))
2780 {
2781 siginfo_t info, *info_p;
2782
2783 if (debug_threads)
2784 debug_printf ("Ignored signal %d for LWP %ld.\n",
2785 WSTOPSIG (w), lwpid_of (current_inferior));
2786
2787 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
2788 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2789 info_p = &info;
2790 else
2791 info_p = NULL;
2792 linux_resume_one_lwp (event_child, event_child->stepping,
2793 WSTOPSIG (w), info_p);
2794 goto retry;
2795 }
2796
2797 /* Note that all addresses are always "out of the step range" when
2798 there's no range to begin with. */
2799 in_step_range = lwp_in_step_range (event_child);
2800
2801 /* If GDB wanted this thread to single step, and the thread is out
2802 of the step range, we always want to report the SIGTRAP, and let
2803 GDB handle it. Watchpoints should always be reported. So should
2804 signals we can't explain. A SIGTRAP we can't explain could be a
2805 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2806 do, we're be able to handle GDB breakpoints on top of internal
2807 breakpoints, by handling the internal breakpoint and still
2808 reporting the event to GDB. If we don't, we're out of luck, GDB
2809 won't see the breakpoint hit. */
2810 report_to_gdb = (!maybe_internal_trap
2811 || (current_inferior->last_resume_kind == resume_step
2812 && !in_step_range)
2813 || event_child->stopped_by_watchpoint
2814 || (!step_over_finished && !in_step_range
2815 && !bp_explains_trap && !trace_event)
2816 || (gdb_breakpoint_here (event_child->stop_pc)
2817 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2818 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2819
2820 run_breakpoint_commands (event_child->stop_pc);
2821
2822 /* We found no reason GDB would want us to stop. We either hit one
2823 of our own breakpoints, or finished an internal step GDB
2824 shouldn't know about. */
2825 if (!report_to_gdb)
2826 {
2827 if (debug_threads)
2828 {
2829 if (bp_explains_trap)
2830 debug_printf ("Hit a gdbserver breakpoint.\n");
2831 if (step_over_finished)
2832 debug_printf ("Step-over finished.\n");
2833 if (trace_event)
2834 debug_printf ("Tracepoint event.\n");
2835 if (lwp_in_step_range (event_child))
2836 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2837 paddress (event_child->stop_pc),
2838 paddress (event_child->step_range_start),
2839 paddress (event_child->step_range_end));
2840 }
2841
2842 /* We're not reporting this breakpoint to GDB, so apply the
2843 decr_pc_after_break adjustment to the inferior's regcache
2844 ourselves. */
2845
2846 if (the_low_target.set_pc != NULL)
2847 {
2848 struct regcache *regcache
2849 = get_thread_regcache (current_inferior, 1);
2850 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2851 }
2852
2853 /* We may have finished stepping over a breakpoint. If so,
2854 we've stopped and suspended all LWPs momentarily except the
2855 stepping one. This is where we resume them all again. We're
2856 going to keep waiting, so use proceed, which handles stepping
2857 over the next breakpoint. */
2858 if (debug_threads)
2859 debug_printf ("proceeding all threads.\n");
2860
2861 if (step_over_finished)
2862 unsuspend_all_lwps (event_child);
2863
2864 proceed_all_lwps ();
2865 goto retry;
2866 }
2867
2868 if (debug_threads)
2869 {
2870 if (current_inferior->last_resume_kind == resume_step)
2871 {
2872 if (event_child->step_range_start == event_child->step_range_end)
2873 debug_printf ("GDB wanted to single-step, reporting event.\n");
2874 else if (!lwp_in_step_range (event_child))
2875 debug_printf ("Out of step range, reporting event.\n");
2876 }
2877 if (event_child->stopped_by_watchpoint)
2878 debug_printf ("Stopped by watchpoint.\n");
2879 if (gdb_breakpoint_here (event_child->stop_pc))
2880 debug_printf ("Stopped by GDB breakpoint.\n");
2881 if (debug_threads)
2882 debug_printf ("Hit a non-gdbserver trap event.\n");
2883 }
2884
2885 /* Alright, we're going to report a stop. */
2886
2887 if (!non_stop && !stabilizing_threads)
2888 {
2889 /* In all-stop, stop all threads. */
2890 stop_all_lwps (0, NULL);
2891
2892 /* If we're not waiting for a specific LWP, choose an event LWP
2893 from among those that have had events. Giving equal priority
2894 to all LWPs that have had events helps prevent
2895 starvation. */
2896 if (ptid_equal (ptid, minus_one_ptid))
2897 {
2898 event_child->status_pending_p = 1;
2899 event_child->status_pending = w;
2900
2901 select_event_lwp (&event_child);
2902
2903 /* current_inferior and event_child must stay in sync. */
2904 current_inferior = get_lwp_thread (event_child);
2905
2906 event_child->status_pending_p = 0;
2907 w = event_child->status_pending;
2908 }
2909
2910 /* Now that we've selected our final event LWP, cancel any
2911 breakpoints in other LWPs that have hit a GDB breakpoint.
2912 See the comment in cancel_breakpoints_callback to find out
2913 why. */
2914 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
2915
2916 /* If we were going a step-over, all other threads but the stepping one
2917 had been paused in start_step_over, with their suspend counts
2918 incremented. We don't want to do a full unstop/unpause, because we're
2919 in all-stop mode (so we want threads stopped), but we still need to
2920 unsuspend the other threads, to decrement their `suspended' count
2921 back. */
2922 if (step_over_finished)
2923 unsuspend_all_lwps (event_child);
2924
2925 /* Stabilize threads (move out of jump pads). */
2926 stabilize_threads ();
2927 }
2928 else
2929 {
2930 /* If we just finished a step-over, then all threads had been
2931 momentarily paused. In all-stop, that's fine, we want
2932 threads stopped by now anyway. In non-stop, we need to
2933 re-resume threads that GDB wanted to be running. */
2934 if (step_over_finished)
2935 unstop_all_lwps (1, event_child);
2936 }
2937
2938 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2939
2940 if (current_inferior->last_resume_kind == resume_stop
2941 && WSTOPSIG (w) == SIGSTOP)
2942 {
2943 /* A thread that has been requested to stop by GDB with vCont;t,
2944 and it stopped cleanly, so report as SIG0. The use of
2945 SIGSTOP is an implementation detail. */
2946 ourstatus->value.sig = GDB_SIGNAL_0;
2947 }
2948 else if (current_inferior->last_resume_kind == resume_stop
2949 && WSTOPSIG (w) != SIGSTOP)
2950 {
2951 /* A thread that has been requested to stop by GDB with vCont;t,
2952 but, it stopped for other reasons. */
2953 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2954 }
2955 else
2956 {
2957 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2958 }
2959
2960 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2961
2962 if (debug_threads)
2963 {
2964 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2965 target_pid_to_str (ptid_of (current_inferior)),
2966 ourstatus->kind, ourstatus->value.sig);
2967 debug_exit ();
2968 }
2969
2970 return ptid_of (current_inferior);
2971 }
2972
2973 /* Get rid of any pending event in the pipe. */
2974 static void
2975 async_file_flush (void)
2976 {
2977 int ret;
2978 char buf;
2979
2980 do
2981 ret = read (linux_event_pipe[0], &buf, 1);
2982 while (ret >= 0 || (ret == -1 && errno == EINTR));
2983 }
2984
2985 /* Put something in the pipe, so the event loop wakes up. */
2986 static void
2987 async_file_mark (void)
2988 {
2989 int ret;
2990
2991 async_file_flush ();
2992
2993 do
2994 ret = write (linux_event_pipe[1], "+", 1);
2995 while (ret == 0 || (ret == -1 && errno == EINTR));
2996
2997 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2998 be awakened anyway. */
2999 }
3000
3001 static ptid_t
3002 linux_wait (ptid_t ptid,
3003 struct target_waitstatus *ourstatus, int target_options)
3004 {
3005 ptid_t event_ptid;
3006
3007 /* Flush the async file first. */
3008 if (target_is_async_p ())
3009 async_file_flush ();
3010
3011 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3012
3013 /* If at least one stop was reported, there may be more. A single
3014 SIGCHLD can signal more than one child stop. */
3015 if (target_is_async_p ()
3016 && (target_options & TARGET_WNOHANG) != 0
3017 && !ptid_equal (event_ptid, null_ptid))
3018 async_file_mark ();
3019
3020 return event_ptid;
3021 }
3022
3023 /* Send a signal to an LWP. */
3024
3025 static int
3026 kill_lwp (unsigned long lwpid, int signo)
3027 {
3028 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3029 fails, then we are not using nptl threads and we should be using kill. */
3030
3031 #ifdef __NR_tkill
3032 {
3033 static int tkill_failed;
3034
3035 if (!tkill_failed)
3036 {
3037 int ret;
3038
3039 errno = 0;
3040 ret = syscall (__NR_tkill, lwpid, signo);
3041 if (errno != ENOSYS)
3042 return ret;
3043 tkill_failed = 1;
3044 }
3045 }
3046 #endif
3047
3048 return kill (lwpid, signo);
3049 }
3050
3051 void
3052 linux_stop_lwp (struct lwp_info *lwp)
3053 {
3054 send_sigstop (lwp);
3055 }
3056
3057 static void
3058 send_sigstop (struct lwp_info *lwp)
3059 {
3060 int pid;
3061
3062 pid = lwpid_of (get_lwp_thread (lwp));
3063
3064 /* If we already have a pending stop signal for this process, don't
3065 send another. */
3066 if (lwp->stop_expected)
3067 {
3068 if (debug_threads)
3069 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3070
3071 return;
3072 }
3073
3074 if (debug_threads)
3075 debug_printf ("Sending sigstop to lwp %d\n", pid);
3076
3077 lwp->stop_expected = 1;
3078 kill_lwp (pid, SIGSTOP);
3079 }
3080
3081 static int
3082 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3083 {
3084 struct thread_info *thread = (struct thread_info *) entry;
3085 struct lwp_info *lwp = get_thread_lwp (thread);
3086
3087 /* Ignore EXCEPT. */
3088 if (lwp == except)
3089 return 0;
3090
3091 if (lwp->stopped)
3092 return 0;
3093
3094 send_sigstop (lwp);
3095 return 0;
3096 }
3097
3098 /* Increment the suspend count of an LWP, and stop it, if not stopped
3099 yet. */
3100 static int
3101 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3102 void *except)
3103 {
3104 struct thread_info *thread = (struct thread_info *) entry;
3105 struct lwp_info *lwp = get_thread_lwp (thread);
3106
3107 /* Ignore EXCEPT. */
3108 if (lwp == except)
3109 return 0;
3110
3111 lwp->suspended++;
3112
3113 return send_sigstop_callback (entry, except);
3114 }
3115
3116 static void
3117 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3118 {
3119 /* It's dead, really. */
3120 lwp->dead = 1;
3121
3122 /* Store the exit status for later. */
3123 lwp->status_pending_p = 1;
3124 lwp->status_pending = wstat;
3125
3126 /* Prevent trying to stop it. */
3127 lwp->stopped = 1;
3128
3129 /* No further stops are expected from a dead lwp. */
3130 lwp->stop_expected = 0;
3131 }
3132
3133 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3134
3135 static void
3136 wait_for_sigstop (void)
3137 {
3138 struct thread_info *saved_inferior;
3139 ptid_t saved_tid;
3140 int wstat;
3141 int ret;
3142
3143 saved_inferior = current_inferior;
3144 if (saved_inferior != NULL)
3145 saved_tid = saved_inferior->entry.id;
3146 else
3147 saved_tid = null_ptid; /* avoid bogus unused warning */
3148
3149 if (debug_threads)
3150 debug_printf ("wait_for_sigstop: pulling events\n");
3151
3152 /* Passing NULL_PTID as filter indicates we want all events to be
3153 left pending. Eventually this returns when there are no
3154 unwaited-for children left. */
3155 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3156 &wstat, __WALL);
3157 gdb_assert (ret == -1);
3158
3159 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3160 current_inferior = saved_inferior;
3161 else
3162 {
3163 if (debug_threads)
3164 debug_printf ("Previously current thread died.\n");
3165
3166 if (non_stop)
3167 {
3168 /* We can't change the current inferior behind GDB's back,
3169 otherwise, a subsequent command may apply to the wrong
3170 process. */
3171 current_inferior = NULL;
3172 }
3173 else
3174 {
3175 /* Set a valid thread as current. */
3176 set_desired_inferior (0);
3177 }
3178 }
3179 }
3180
3181 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3182 move it out, because we need to report the stop event to GDB. For
3183 example, if the user puts a breakpoint in the jump pad, it's
3184 because she wants to debug it. */
3185
3186 static int
3187 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3188 {
3189 struct thread_info *thread = (struct thread_info *) entry;
3190 struct lwp_info *lwp = get_thread_lwp (thread);
3191
3192 gdb_assert (lwp->suspended == 0);
3193 gdb_assert (lwp->stopped);
3194
3195 /* Allow debugging the jump pad, gdb_collect, etc.. */
3196 return (supports_fast_tracepoints ()
3197 && agent_loaded_p ()
3198 && (gdb_breakpoint_here (lwp->stop_pc)
3199 || lwp->stopped_by_watchpoint
3200 || thread->last_resume_kind == resume_step)
3201 && linux_fast_tracepoint_collecting (lwp, NULL));
3202 }
3203
3204 static void
3205 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3206 {
3207 struct thread_info *thread = (struct thread_info *) entry;
3208 struct lwp_info *lwp = get_thread_lwp (thread);
3209 int *wstat;
3210
3211 gdb_assert (lwp->suspended == 0);
3212 gdb_assert (lwp->stopped);
3213
3214 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3215
3216 /* Allow debugging the jump pad, gdb_collect, etc. */
3217 if (!gdb_breakpoint_here (lwp->stop_pc)
3218 && !lwp->stopped_by_watchpoint
3219 && thread->last_resume_kind != resume_step
3220 && maybe_move_out_of_jump_pad (lwp, wstat))
3221 {
3222 if (debug_threads)
3223 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3224 lwpid_of (thread));
3225
3226 if (wstat)
3227 {
3228 lwp->status_pending_p = 0;
3229 enqueue_one_deferred_signal (lwp, wstat);
3230
3231 if (debug_threads)
3232 debug_printf ("Signal %d for LWP %ld deferred "
3233 "(in jump pad)\n",
3234 WSTOPSIG (*wstat), lwpid_of (thread));
3235 }
3236
3237 linux_resume_one_lwp (lwp, 0, 0, NULL);
3238 }
3239 else
3240 lwp->suspended++;
3241 }
3242
3243 static int
3244 lwp_running (struct inferior_list_entry *entry, void *data)
3245 {
3246 struct thread_info *thread = (struct thread_info *) entry;
3247 struct lwp_info *lwp = get_thread_lwp (thread);
3248
3249 if (lwp->dead)
3250 return 0;
3251 if (lwp->stopped)
3252 return 0;
3253 return 1;
3254 }
3255
3256 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3257 If SUSPEND, then also increase the suspend count of every LWP,
3258 except EXCEPT. */
3259
3260 static void
3261 stop_all_lwps (int suspend, struct lwp_info *except)
3262 {
3263 /* Should not be called recursively. */
3264 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3265
3266 if (debug_threads)
3267 {
3268 debug_enter ();
3269 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3270 suspend ? "stop-and-suspend" : "stop",
3271 except != NULL
3272 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3273 : "none");
3274 }
3275
3276 stopping_threads = (suspend
3277 ? STOPPING_AND_SUSPENDING_THREADS
3278 : STOPPING_THREADS);
3279
3280 if (suspend)
3281 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3282 else
3283 find_inferior (&all_threads, send_sigstop_callback, except);
3284 wait_for_sigstop ();
3285 stopping_threads = NOT_STOPPING_THREADS;
3286
3287 if (debug_threads)
3288 {
3289 debug_printf ("stop_all_lwps done, setting stopping_threads "
3290 "back to !stopping\n");
3291 debug_exit ();
3292 }
3293 }
3294
3295 /* Resume execution of the inferior process.
3296 If STEP is nonzero, single-step it.
3297 If SIGNAL is nonzero, give it that signal. */
3298
3299 static void
3300 linux_resume_one_lwp (struct lwp_info *lwp,
3301 int step, int signal, siginfo_t *info)
3302 {
3303 struct thread_info *thread = get_lwp_thread (lwp);
3304 struct thread_info *saved_inferior;
3305 int fast_tp_collecting;
3306
3307 if (lwp->stopped == 0)
3308 return;
3309
3310 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3311
3312 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3313
3314 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3315 user used the "jump" command, or "set $pc = foo"). */
3316 if (lwp->stop_pc != get_pc (lwp))
3317 {
3318 /* Collecting 'while-stepping' actions doesn't make sense
3319 anymore. */
3320 release_while_stepping_state_list (thread);
3321 }
3322
3323 /* If we have pending signals or status, and a new signal, enqueue the
3324 signal. Also enqueue the signal if we are waiting to reinsert a
3325 breakpoint; it will be picked up again below. */
3326 if (signal != 0
3327 && (lwp->status_pending_p
3328 || lwp->pending_signals != NULL
3329 || lwp->bp_reinsert != 0
3330 || fast_tp_collecting))
3331 {
3332 struct pending_signals *p_sig;
3333 p_sig = xmalloc (sizeof (*p_sig));
3334 p_sig->prev = lwp->pending_signals;
3335 p_sig->signal = signal;
3336 if (info == NULL)
3337 memset (&p_sig->info, 0, sizeof (siginfo_t));
3338 else
3339 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3340 lwp->pending_signals = p_sig;
3341 }
3342
3343 if (lwp->status_pending_p)
3344 {
3345 if (debug_threads)
3346 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3347 " has pending status\n",
3348 lwpid_of (thread), step ? "step" : "continue", signal,
3349 lwp->stop_expected ? "expected" : "not expected");
3350 return;
3351 }
3352
3353 saved_inferior = current_inferior;
3354 current_inferior = thread;
3355
3356 if (debug_threads)
3357 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3358 lwpid_of (thread), step ? "step" : "continue", signal,
3359 lwp->stop_expected ? "expected" : "not expected");
3360
3361 /* This bit needs some thinking about. If we get a signal that
3362 we must report while a single-step reinsert is still pending,
3363 we often end up resuming the thread. It might be better to
3364 (ew) allow a stack of pending events; then we could be sure that
3365 the reinsert happened right away and not lose any signals.
3366
3367 Making this stack would also shrink the window in which breakpoints are
3368 uninserted (see comment in linux_wait_for_lwp) but not enough for
3369 complete correctness, so it won't solve that problem. It may be
3370 worthwhile just to solve this one, however. */
3371 if (lwp->bp_reinsert != 0)
3372 {
3373 if (debug_threads)
3374 debug_printf (" pending reinsert at 0x%s\n",
3375 paddress (lwp->bp_reinsert));
3376
3377 if (can_hardware_single_step ())
3378 {
3379 if (fast_tp_collecting == 0)
3380 {
3381 if (step == 0)
3382 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3383 if (lwp->suspended)
3384 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3385 lwp->suspended);
3386 }
3387
3388 step = 1;
3389 }
3390
3391 /* Postpone any pending signal. It was enqueued above. */
3392 signal = 0;
3393 }
3394
3395 if (fast_tp_collecting == 1)
3396 {
3397 if (debug_threads)
3398 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3399 " (exit-jump-pad-bkpt)\n",
3400 lwpid_of (thread));
3401
3402 /* Postpone any pending signal. It was enqueued above. */
3403 signal = 0;
3404 }
3405 else if (fast_tp_collecting == 2)
3406 {
3407 if (debug_threads)
3408 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3409 " single-stepping\n",
3410 lwpid_of (thread));
3411
3412 if (can_hardware_single_step ())
3413 step = 1;
3414 else
3415 fatal ("moving out of jump pad single-stepping"
3416 " not implemented on this target");
3417
3418 /* Postpone any pending signal. It was enqueued above. */
3419 signal = 0;
3420 }
3421
3422 /* If we have while-stepping actions in this thread set it stepping.
3423 If we have a signal to deliver, it may or may not be set to
3424 SIG_IGN, we don't know. Assume so, and allow collecting
3425 while-stepping into a signal handler. A possible smart thing to
3426 do would be to set an internal breakpoint at the signal return
3427 address, continue, and carry on catching this while-stepping
3428 action only when that breakpoint is hit. A future
3429 enhancement. */
3430 if (thread->while_stepping != NULL
3431 && can_hardware_single_step ())
3432 {
3433 if (debug_threads)
3434 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3435 lwpid_of (thread));
3436 step = 1;
3437 }
3438
3439 if (debug_threads && the_low_target.get_pc != NULL)
3440 {
3441 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3442 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3443 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3444 }
3445
3446 /* If we have pending signals, consume one unless we are trying to
3447 reinsert a breakpoint or we're trying to finish a fast tracepoint
3448 collect. */
3449 if (lwp->pending_signals != NULL
3450 && lwp->bp_reinsert == 0
3451 && fast_tp_collecting == 0)
3452 {
3453 struct pending_signals **p_sig;
3454
3455 p_sig = &lwp->pending_signals;
3456 while ((*p_sig)->prev != NULL)
3457 p_sig = &(*p_sig)->prev;
3458
3459 signal = (*p_sig)->signal;
3460 if ((*p_sig)->info.si_signo != 0)
3461 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3462 &(*p_sig)->info);
3463
3464 free (*p_sig);
3465 *p_sig = NULL;
3466 }
3467
3468 if (the_low_target.prepare_to_resume != NULL)
3469 the_low_target.prepare_to_resume (lwp);
3470
3471 regcache_invalidate_thread (thread);
3472 errno = 0;
3473 lwp->stopped = 0;
3474 lwp->stopped_by_watchpoint = 0;
3475 lwp->stepping = step;
3476 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3477 (PTRACE_TYPE_ARG3) 0,
3478 /* Coerce to a uintptr_t first to avoid potential gcc warning
3479 of coercing an 8 byte integer to a 4 byte pointer. */
3480 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3481
3482 current_inferior = saved_inferior;
3483 if (errno)
3484 {
3485 /* ESRCH from ptrace either means that the thread was already
3486 running (an error) or that it is gone (a race condition). If
3487 it's gone, we will get a notification the next time we wait,
3488 so we can ignore the error. We could differentiate these
3489 two, but it's tricky without waiting; the thread still exists
3490 as a zombie, so sending it signal 0 would succeed. So just
3491 ignore ESRCH. */
3492 if (errno == ESRCH)
3493 return;
3494
3495 perror_with_name ("ptrace");
3496 }
3497 }
3498
3499 struct thread_resume_array
3500 {
3501 struct thread_resume *resume;
3502 size_t n;
3503 };
3504
3505 /* This function is called once per thread via find_inferior.
3506 ARG is a pointer to a thread_resume_array struct.
3507 We look up the thread specified by ENTRY in ARG, and mark the thread
3508 with a pointer to the appropriate resume request.
3509
3510 This algorithm is O(threads * resume elements), but resume elements
3511 is small (and will remain small at least until GDB supports thread
3512 suspension). */
3513
3514 static int
3515 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3516 {
3517 struct thread_info *thread = (struct thread_info *) entry;
3518 struct lwp_info *lwp = get_thread_lwp (thread);
3519 int ndx;
3520 struct thread_resume_array *r;
3521
3522 r = arg;
3523
3524 for (ndx = 0; ndx < r->n; ndx++)
3525 {
3526 ptid_t ptid = r->resume[ndx].thread;
3527 if (ptid_equal (ptid, minus_one_ptid)
3528 || ptid_equal (ptid, entry->id)
3529 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3530 of PID'. */
3531 || (ptid_get_pid (ptid) == pid_of (thread)
3532 && (ptid_is_pid (ptid)
3533 || ptid_get_lwp (ptid) == -1)))
3534 {
3535 if (r->resume[ndx].kind == resume_stop
3536 && thread->last_resume_kind == resume_stop)
3537 {
3538 if (debug_threads)
3539 debug_printf ("already %s LWP %ld at GDB's request\n",
3540 (thread->last_status.kind
3541 == TARGET_WAITKIND_STOPPED)
3542 ? "stopped"
3543 : "stopping",
3544 lwpid_of (thread));
3545
3546 continue;
3547 }
3548
3549 lwp->resume = &r->resume[ndx];
3550 thread->last_resume_kind = lwp->resume->kind;
3551
3552 lwp->step_range_start = lwp->resume->step_range_start;
3553 lwp->step_range_end = lwp->resume->step_range_end;
3554
3555 /* If we had a deferred signal to report, dequeue one now.
3556 This can happen if LWP gets more than one signal while
3557 trying to get out of a jump pad. */
3558 if (lwp->stopped
3559 && !lwp->status_pending_p
3560 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3561 {
3562 lwp->status_pending_p = 1;
3563
3564 if (debug_threads)
3565 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3566 "leaving status pending.\n",
3567 WSTOPSIG (lwp->status_pending),
3568 lwpid_of (thread));
3569 }
3570
3571 return 0;
3572 }
3573 }
3574
3575 /* No resume action for this thread. */
3576 lwp->resume = NULL;
3577
3578 return 0;
3579 }
3580
3581 /* find_inferior callback for linux_resume.
3582 Set *FLAG_P if this lwp has an interesting status pending. */
3583
3584 static int
3585 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3586 {
3587 struct thread_info *thread = (struct thread_info *) entry;
3588 struct lwp_info *lwp = get_thread_lwp (thread);
3589
3590 /* LWPs which will not be resumed are not interesting, because
3591 we might not wait for them next time through linux_wait. */
3592 if (lwp->resume == NULL)
3593 return 0;
3594
3595 if (lwp->status_pending_p)
3596 * (int *) flag_p = 1;
3597
3598 return 0;
3599 }
3600
3601 /* Return 1 if this lwp that GDB wants running is stopped at an
3602 internal breakpoint that we need to step over. It assumes that any
3603 required STOP_PC adjustment has already been propagated to the
3604 inferior's regcache. */
3605
3606 static int
3607 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3608 {
3609 struct thread_info *thread = (struct thread_info *) entry;
3610 struct lwp_info *lwp = get_thread_lwp (thread);
3611 struct thread_info *saved_inferior;
3612 CORE_ADDR pc;
3613
3614 /* LWPs which will not be resumed are not interesting, because we
3615 might not wait for them next time through linux_wait. */
3616
3617 if (!lwp->stopped)
3618 {
3619 if (debug_threads)
3620 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3621 lwpid_of (thread));
3622 return 0;
3623 }
3624
3625 if (thread->last_resume_kind == resume_stop)
3626 {
3627 if (debug_threads)
3628 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3629 " stopped\n",
3630 lwpid_of (thread));
3631 return 0;
3632 }
3633
3634 gdb_assert (lwp->suspended >= 0);
3635
3636 if (lwp->suspended)
3637 {
3638 if (debug_threads)
3639 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3640 lwpid_of (thread));
3641 return 0;
3642 }
3643
3644 if (!lwp->need_step_over)
3645 {
3646 if (debug_threads)
3647 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3648 }
3649
3650 if (lwp->status_pending_p)
3651 {
3652 if (debug_threads)
3653 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3654 " status.\n",
3655 lwpid_of (thread));
3656 return 0;
3657 }
3658
3659 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3660 or we have. */
3661 pc = get_pc (lwp);
3662
3663 /* If the PC has changed since we stopped, then don't do anything,
3664 and let the breakpoint/tracepoint be hit. This happens if, for
3665 instance, GDB handled the decr_pc_after_break subtraction itself,
3666 GDB is OOL stepping this thread, or the user has issued a "jump"
3667 command, or poked thread's registers herself. */
3668 if (pc != lwp->stop_pc)
3669 {
3670 if (debug_threads)
3671 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3672 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3673 lwpid_of (thread),
3674 paddress (lwp->stop_pc), paddress (pc));
3675
3676 lwp->need_step_over = 0;
3677 return 0;
3678 }
3679
3680 saved_inferior = current_inferior;
3681 current_inferior = thread;
3682
3683 /* We can only step over breakpoints we know about. */
3684 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3685 {
3686 /* Don't step over a breakpoint that GDB expects to hit
3687 though. If the condition is being evaluated on the target's side
3688 and it evaluate to false, step over this breakpoint as well. */
3689 if (gdb_breakpoint_here (pc)
3690 && gdb_condition_true_at_breakpoint (pc)
3691 && gdb_no_commands_at_breakpoint (pc))
3692 {
3693 if (debug_threads)
3694 debug_printf ("Need step over [LWP %ld]? yes, but found"
3695 " GDB breakpoint at 0x%s; skipping step over\n",
3696 lwpid_of (thread), paddress (pc));
3697
3698 current_inferior = saved_inferior;
3699 return 0;
3700 }
3701 else
3702 {
3703 if (debug_threads)
3704 debug_printf ("Need step over [LWP %ld]? yes, "
3705 "found breakpoint at 0x%s\n",
3706 lwpid_of (thread), paddress (pc));
3707
3708 /* We've found an lwp that needs stepping over --- return 1 so
3709 that find_inferior stops looking. */
3710 current_inferior = saved_inferior;
3711
3712 /* If the step over is cancelled, this is set again. */
3713 lwp->need_step_over = 0;
3714 return 1;
3715 }
3716 }
3717
3718 current_inferior = saved_inferior;
3719
3720 if (debug_threads)
3721 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3722 " at 0x%s\n",
3723 lwpid_of (thread), paddress (pc));
3724
3725 return 0;
3726 }
3727
3728 /* Start a step-over operation on LWP. When LWP stopped at a
3729 breakpoint, to make progress, we need to remove the breakpoint out
3730 of the way. If we let other threads run while we do that, they may
3731 pass by the breakpoint location and miss hitting it. To avoid
3732 that, a step-over momentarily stops all threads while LWP is
3733 single-stepped while the breakpoint is temporarily uninserted from
3734 the inferior. When the single-step finishes, we reinsert the
3735 breakpoint, and let all threads that are supposed to be running,
3736 run again.
3737
3738 On targets that don't support hardware single-step, we don't
3739 currently support full software single-stepping. Instead, we only
3740 support stepping over the thread event breakpoint, by asking the
3741 low target where to place a reinsert breakpoint. Since this
3742 routine assumes the breakpoint being stepped over is a thread event
3743 breakpoint, it usually assumes the return address of the current
3744 function is a good enough place to set the reinsert breakpoint. */
3745
3746 static int
3747 start_step_over (struct lwp_info *lwp)
3748 {
3749 struct thread_info *thread = get_lwp_thread (lwp);
3750 struct thread_info *saved_inferior;
3751 CORE_ADDR pc;
3752 int step;
3753
3754 if (debug_threads)
3755 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3756 lwpid_of (thread));
3757
3758 stop_all_lwps (1, lwp);
3759 gdb_assert (lwp->suspended == 0);
3760
3761 if (debug_threads)
3762 debug_printf ("Done stopping all threads for step-over.\n");
3763
3764 /* Note, we should always reach here with an already adjusted PC,
3765 either by GDB (if we're resuming due to GDB's request), or by our
3766 caller, if we just finished handling an internal breakpoint GDB
3767 shouldn't care about. */
3768 pc = get_pc (lwp);
3769
3770 saved_inferior = current_inferior;
3771 current_inferior = thread;
3772
3773 lwp->bp_reinsert = pc;
3774 uninsert_breakpoints_at (pc);
3775 uninsert_fast_tracepoint_jumps_at (pc);
3776
3777 if (can_hardware_single_step ())
3778 {
3779 step = 1;
3780 }
3781 else
3782 {
3783 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3784 set_reinsert_breakpoint (raddr);
3785 step = 0;
3786 }
3787
3788 current_inferior = saved_inferior;
3789
3790 linux_resume_one_lwp (lwp, step, 0, NULL);
3791
3792 /* Require next event from this LWP. */
3793 step_over_bkpt = thread->entry.id;
3794 return 1;
3795 }
3796
3797 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3798 start_step_over, if still there, and delete any reinsert
3799 breakpoints we've set, on non hardware single-step targets. */
3800
3801 static int
3802 finish_step_over (struct lwp_info *lwp)
3803 {
3804 if (lwp->bp_reinsert != 0)
3805 {
3806 if (debug_threads)
3807 debug_printf ("Finished step over.\n");
3808
3809 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3810 may be no breakpoint to reinsert there by now. */
3811 reinsert_breakpoints_at (lwp->bp_reinsert);
3812 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3813
3814 lwp->bp_reinsert = 0;
3815
3816 /* Delete any software-single-step reinsert breakpoints. No
3817 longer needed. We don't have to worry about other threads
3818 hitting this trap, and later not being able to explain it,
3819 because we were stepping over a breakpoint, and we hold all
3820 threads but LWP stopped while doing that. */
3821 if (!can_hardware_single_step ())
3822 delete_reinsert_breakpoints ();
3823
3824 step_over_bkpt = null_ptid;
3825 return 1;
3826 }
3827 else
3828 return 0;
3829 }
3830
3831 /* This function is called once per thread. We check the thread's resume
3832 request, which will tell us whether to resume, step, or leave the thread
3833 stopped; and what signal, if any, it should be sent.
3834
3835 For threads which we aren't explicitly told otherwise, we preserve
3836 the stepping flag; this is used for stepping over gdbserver-placed
3837 breakpoints.
3838
3839 If pending_flags was set in any thread, we queue any needed
3840 signals, since we won't actually resume. We already have a pending
3841 event to report, so we don't need to preserve any step requests;
3842 they should be re-issued if necessary. */
3843
3844 static int
3845 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3846 {
3847 struct thread_info *thread = (struct thread_info *) entry;
3848 struct lwp_info *lwp = get_thread_lwp (thread);
3849 int step;
3850 int leave_all_stopped = * (int *) arg;
3851 int leave_pending;
3852
3853 if (lwp->resume == NULL)
3854 return 0;
3855
3856 if (lwp->resume->kind == resume_stop)
3857 {
3858 if (debug_threads)
3859 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3860
3861 if (!lwp->stopped)
3862 {
3863 if (debug_threads)
3864 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3865
3866 /* Stop the thread, and wait for the event asynchronously,
3867 through the event loop. */
3868 send_sigstop (lwp);
3869 }
3870 else
3871 {
3872 if (debug_threads)
3873 debug_printf ("already stopped LWP %ld\n",
3874 lwpid_of (thread));
3875
3876 /* The LWP may have been stopped in an internal event that
3877 was not meant to be notified back to GDB (e.g., gdbserver
3878 breakpoint), so we should be reporting a stop event in
3879 this case too. */
3880
3881 /* If the thread already has a pending SIGSTOP, this is a
3882 no-op. Otherwise, something later will presumably resume
3883 the thread and this will cause it to cancel any pending
3884 operation, due to last_resume_kind == resume_stop. If
3885 the thread already has a pending status to report, we
3886 will still report it the next time we wait - see
3887 status_pending_p_callback. */
3888
3889 /* If we already have a pending signal to report, then
3890 there's no need to queue a SIGSTOP, as this means we're
3891 midway through moving the LWP out of the jumppad, and we
3892 will report the pending signal as soon as that is
3893 finished. */
3894 if (lwp->pending_signals_to_report == NULL)
3895 send_sigstop (lwp);
3896 }
3897
3898 /* For stop requests, we're done. */
3899 lwp->resume = NULL;
3900 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3901 return 0;
3902 }
3903
3904 /* If this thread which is about to be resumed has a pending status,
3905 then don't resume any threads - we can just report the pending
3906 status. Make sure to queue any signals that would otherwise be
3907 sent. In all-stop mode, we do this decision based on if *any*
3908 thread has a pending status. If there's a thread that needs the
3909 step-over-breakpoint dance, then don't resume any other thread
3910 but that particular one. */
3911 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3912
3913 if (!leave_pending)
3914 {
3915 if (debug_threads)
3916 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3917
3918 step = (lwp->resume->kind == resume_step);
3919 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3920 }
3921 else
3922 {
3923 if (debug_threads)
3924 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3925
3926 /* If we have a new signal, enqueue the signal. */
3927 if (lwp->resume->sig != 0)
3928 {
3929 struct pending_signals *p_sig;
3930 p_sig = xmalloc (sizeof (*p_sig));
3931 p_sig->prev = lwp->pending_signals;
3932 p_sig->signal = lwp->resume->sig;
3933 memset (&p_sig->info, 0, sizeof (siginfo_t));
3934
3935 /* If this is the same signal we were previously stopped by,
3936 make sure to queue its siginfo. We can ignore the return
3937 value of ptrace; if it fails, we'll skip
3938 PTRACE_SETSIGINFO. */
3939 if (WIFSTOPPED (lwp->last_status)
3940 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3941 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3942 &p_sig->info);
3943
3944 lwp->pending_signals = p_sig;
3945 }
3946 }
3947
3948 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3949 lwp->resume = NULL;
3950 return 0;
3951 }
3952
3953 static void
3954 linux_resume (struct thread_resume *resume_info, size_t n)
3955 {
3956 struct thread_resume_array array = { resume_info, n };
3957 struct thread_info *need_step_over = NULL;
3958 int any_pending;
3959 int leave_all_stopped;
3960
3961 if (debug_threads)
3962 {
3963 debug_enter ();
3964 debug_printf ("linux_resume:\n");
3965 }
3966
3967 find_inferior (&all_threads, linux_set_resume_request, &array);
3968
3969 /* If there is a thread which would otherwise be resumed, which has
3970 a pending status, then don't resume any threads - we can just
3971 report the pending status. Make sure to queue any signals that
3972 would otherwise be sent. In non-stop mode, we'll apply this
3973 logic to each thread individually. We consume all pending events
3974 before considering to start a step-over (in all-stop). */
3975 any_pending = 0;
3976 if (!non_stop)
3977 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
3978
3979 /* If there is a thread which would otherwise be resumed, which is
3980 stopped at a breakpoint that needs stepping over, then don't
3981 resume any threads - have it step over the breakpoint with all
3982 other threads stopped, then resume all threads again. Make sure
3983 to queue any signals that would otherwise be delivered or
3984 queued. */
3985 if (!any_pending && supports_breakpoints ())
3986 need_step_over
3987 = (struct thread_info *) find_inferior (&all_threads,
3988 need_step_over_p, NULL);
3989
3990 leave_all_stopped = (need_step_over != NULL || any_pending);
3991
3992 if (debug_threads)
3993 {
3994 if (need_step_over != NULL)
3995 debug_printf ("Not resuming all, need step over\n");
3996 else if (any_pending)
3997 debug_printf ("Not resuming, all-stop and found "
3998 "an LWP with pending status\n");
3999 else
4000 debug_printf ("Resuming, no pending status or step over needed\n");
4001 }
4002
4003 /* Even if we're leaving threads stopped, queue all signals we'd
4004 otherwise deliver. */
4005 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4006
4007 if (need_step_over)
4008 start_step_over (get_thread_lwp (need_step_over));
4009
4010 if (debug_threads)
4011 {
4012 debug_printf ("linux_resume done\n");
4013 debug_exit ();
4014 }
4015 }
4016
4017 /* This function is called once per thread. We check the thread's
4018 last resume request, which will tell us whether to resume, step, or
4019 leave the thread stopped. Any signal the client requested to be
4020 delivered has already been enqueued at this point.
4021
4022 If any thread that GDB wants running is stopped at an internal
4023 breakpoint that needs stepping over, we start a step-over operation
4024 on that particular thread, and leave all others stopped. */
4025
4026 static int
4027 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4028 {
4029 struct thread_info *thread = (struct thread_info *) entry;
4030 struct lwp_info *lwp = get_thread_lwp (thread);
4031 int step;
4032
4033 if (lwp == except)
4034 return 0;
4035
4036 if (debug_threads)
4037 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4038
4039 if (!lwp->stopped)
4040 {
4041 if (debug_threads)
4042 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4043 return 0;
4044 }
4045
4046 if (thread->last_resume_kind == resume_stop
4047 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4048 {
4049 if (debug_threads)
4050 debug_printf (" client wants LWP to remain %ld stopped\n",
4051 lwpid_of (thread));
4052 return 0;
4053 }
4054
4055 if (lwp->status_pending_p)
4056 {
4057 if (debug_threads)
4058 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4059 lwpid_of (thread));
4060 return 0;
4061 }
4062
4063 gdb_assert (lwp->suspended >= 0);
4064
4065 if (lwp->suspended)
4066 {
4067 if (debug_threads)
4068 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4069 return 0;
4070 }
4071
4072 if (thread->last_resume_kind == resume_stop
4073 && lwp->pending_signals_to_report == NULL
4074 && lwp->collecting_fast_tracepoint == 0)
4075 {
4076 /* We haven't reported this LWP as stopped yet (otherwise, the
4077 last_status.kind check above would catch it, and we wouldn't
4078 reach here. This LWP may have been momentarily paused by a
4079 stop_all_lwps call while handling for example, another LWP's
4080 step-over. In that case, the pending expected SIGSTOP signal
4081 that was queued at vCont;t handling time will have already
4082 been consumed by wait_for_sigstop, and so we need to requeue
4083 another one here. Note that if the LWP already has a SIGSTOP
4084 pending, this is a no-op. */
4085
4086 if (debug_threads)
4087 debug_printf ("Client wants LWP %ld to stop. "
4088 "Making sure it has a SIGSTOP pending\n",
4089 lwpid_of (thread));
4090
4091 send_sigstop (lwp);
4092 }
4093
4094 step = thread->last_resume_kind == resume_step;
4095 linux_resume_one_lwp (lwp, step, 0, NULL);
4096 return 0;
4097 }
4098
4099 static int
4100 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4101 {
4102 struct thread_info *thread = (struct thread_info *) entry;
4103 struct lwp_info *lwp = get_thread_lwp (thread);
4104
4105 if (lwp == except)
4106 return 0;
4107
4108 lwp->suspended--;
4109 gdb_assert (lwp->suspended >= 0);
4110
4111 return proceed_one_lwp (entry, except);
4112 }
4113
4114 /* When we finish a step-over, set threads running again. If there's
4115 another thread that may need a step-over, now's the time to start
4116 it. Eventually, we'll move all threads past their breakpoints. */
4117
4118 static void
4119 proceed_all_lwps (void)
4120 {
4121 struct thread_info *need_step_over;
4122
4123 /* If there is a thread which would otherwise be resumed, which is
4124 stopped at a breakpoint that needs stepping over, then don't
4125 resume any threads - have it step over the breakpoint with all
4126 other threads stopped, then resume all threads again. */
4127
4128 if (supports_breakpoints ())
4129 {
4130 need_step_over
4131 = (struct thread_info *) find_inferior (&all_threads,
4132 need_step_over_p, NULL);
4133
4134 if (need_step_over != NULL)
4135 {
4136 if (debug_threads)
4137 debug_printf ("proceed_all_lwps: found "
4138 "thread %ld needing a step-over\n",
4139 lwpid_of (need_step_over));
4140
4141 start_step_over (get_thread_lwp (need_step_over));
4142 return;
4143 }
4144 }
4145
4146 if (debug_threads)
4147 debug_printf ("Proceeding, no step-over needed\n");
4148
4149 find_inferior (&all_threads, proceed_one_lwp, NULL);
4150 }
4151
4152 /* Stopped LWPs that the client wanted to be running, that don't have
4153 pending statuses, are set to run again, except for EXCEPT, if not
4154 NULL. This undoes a stop_all_lwps call. */
4155
4156 static void
4157 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4158 {
4159 if (debug_threads)
4160 {
4161 debug_enter ();
4162 if (except)
4163 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4164 lwpid_of (get_lwp_thread (except)));
4165 else
4166 debug_printf ("unstopping all lwps\n");
4167 }
4168
4169 if (unsuspend)
4170 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4171 else
4172 find_inferior (&all_threads, proceed_one_lwp, except);
4173
4174 if (debug_threads)
4175 {
4176 debug_printf ("unstop_all_lwps done\n");
4177 debug_exit ();
4178 }
4179 }
4180
4181
4182 #ifdef HAVE_LINUX_REGSETS
4183
4184 #define use_linux_regsets 1
4185
4186 /* Returns true if REGSET has been disabled. */
4187
4188 static int
4189 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4190 {
4191 return (info->disabled_regsets != NULL
4192 && info->disabled_regsets[regset - info->regsets]);
4193 }
4194
4195 /* Disable REGSET. */
4196
4197 static void
4198 disable_regset (struct regsets_info *info, struct regset_info *regset)
4199 {
4200 int dr_offset;
4201
4202 dr_offset = regset - info->regsets;
4203 if (info->disabled_regsets == NULL)
4204 info->disabled_regsets = xcalloc (1, info->num_regsets);
4205 info->disabled_regsets[dr_offset] = 1;
4206 }
4207
4208 static int
4209 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4210 struct regcache *regcache)
4211 {
4212 struct regset_info *regset;
4213 int saw_general_regs = 0;
4214 int pid;
4215 struct iovec iov;
4216
4217 regset = regsets_info->regsets;
4218
4219 pid = lwpid_of (current_inferior);
4220 while (regset->size >= 0)
4221 {
4222 void *buf, *data;
4223 int nt_type, res;
4224
4225 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4226 {
4227 regset ++;
4228 continue;
4229 }
4230
4231 buf = xmalloc (regset->size);
4232
4233 nt_type = regset->nt_type;
4234 if (nt_type)
4235 {
4236 iov.iov_base = buf;
4237 iov.iov_len = regset->size;
4238 data = (void *) &iov;
4239 }
4240 else
4241 data = buf;
4242
4243 #ifndef __sparc__
4244 res = ptrace (regset->get_request, pid,
4245 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4246 #else
4247 res = ptrace (regset->get_request, pid, data, nt_type);
4248 #endif
4249 if (res < 0)
4250 {
4251 if (errno == EIO)
4252 {
4253 /* If we get EIO on a regset, do not try it again for
4254 this process mode. */
4255 disable_regset (regsets_info, regset);
4256 free (buf);
4257 continue;
4258 }
4259 else
4260 {
4261 char s[256];
4262 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4263 pid);
4264 perror (s);
4265 }
4266 }
4267 else if (regset->type == GENERAL_REGS)
4268 saw_general_regs = 1;
4269 regset->store_function (regcache, buf);
4270 regset ++;
4271 free (buf);
4272 }
4273 if (saw_general_regs)
4274 return 0;
4275 else
4276 return 1;
4277 }
4278
4279 static int
4280 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4281 struct regcache *regcache)
4282 {
4283 struct regset_info *regset;
4284 int saw_general_regs = 0;
4285 int pid;
4286 struct iovec iov;
4287
4288 regset = regsets_info->regsets;
4289
4290 pid = lwpid_of (current_inferior);
4291 while (regset->size >= 0)
4292 {
4293 void *buf, *data;
4294 int nt_type, res;
4295
4296 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4297 {
4298 regset ++;
4299 continue;
4300 }
4301
4302 buf = xmalloc (regset->size);
4303
4304 /* First fill the buffer with the current register set contents,
4305 in case there are any items in the kernel's regset that are
4306 not in gdbserver's regcache. */
4307
4308 nt_type = regset->nt_type;
4309 if (nt_type)
4310 {
4311 iov.iov_base = buf;
4312 iov.iov_len = regset->size;
4313 data = (void *) &iov;
4314 }
4315 else
4316 data = buf;
4317
4318 #ifndef __sparc__
4319 res = ptrace (regset->get_request, pid,
4320 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4321 #else
4322 res = ptrace (regset->get_request, pid, data, nt_type);
4323 #endif
4324
4325 if (res == 0)
4326 {
4327 /* Then overlay our cached registers on that. */
4328 regset->fill_function (regcache, buf);
4329
4330 /* Only now do we write the register set. */
4331 #ifndef __sparc__
4332 res = ptrace (regset->set_request, pid,
4333 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4334 #else
4335 res = ptrace (regset->set_request, pid, data, nt_type);
4336 #endif
4337 }
4338
4339 if (res < 0)
4340 {
4341 if (errno == EIO)
4342 {
4343 /* If we get EIO on a regset, do not try it again for
4344 this process mode. */
4345 disable_regset (regsets_info, regset);
4346 free (buf);
4347 continue;
4348 }
4349 else if (errno == ESRCH)
4350 {
4351 /* At this point, ESRCH should mean the process is
4352 already gone, in which case we simply ignore attempts
4353 to change its registers. See also the related
4354 comment in linux_resume_one_lwp. */
4355 free (buf);
4356 return 0;
4357 }
4358 else
4359 {
4360 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4361 }
4362 }
4363 else if (regset->type == GENERAL_REGS)
4364 saw_general_regs = 1;
4365 regset ++;
4366 free (buf);
4367 }
4368 if (saw_general_regs)
4369 return 0;
4370 else
4371 return 1;
4372 }
4373
4374 #else /* !HAVE_LINUX_REGSETS */
4375
4376 #define use_linux_regsets 0
4377 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4378 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4379
4380 #endif
4381
4382 /* Return 1 if register REGNO is supported by one of the regset ptrace
4383 calls or 0 if it has to be transferred individually. */
4384
4385 static int
4386 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4387 {
4388 unsigned char mask = 1 << (regno % 8);
4389 size_t index = regno / 8;
4390
4391 return (use_linux_regsets
4392 && (regs_info->regset_bitmap == NULL
4393 || (regs_info->regset_bitmap[index] & mask) != 0));
4394 }
4395
4396 #ifdef HAVE_LINUX_USRREGS
4397
4398 int
4399 register_addr (const struct usrregs_info *usrregs, int regnum)
4400 {
4401 int addr;
4402
4403 if (regnum < 0 || regnum >= usrregs->num_regs)
4404 error ("Invalid register number %d.", regnum);
4405
4406 addr = usrregs->regmap[regnum];
4407
4408 return addr;
4409 }
4410
4411 /* Fetch one register. */
4412 static void
4413 fetch_register (const struct usrregs_info *usrregs,
4414 struct regcache *regcache, int regno)
4415 {
4416 CORE_ADDR regaddr;
4417 int i, size;
4418 char *buf;
4419 int pid;
4420
4421 if (regno >= usrregs->num_regs)
4422 return;
4423 if ((*the_low_target.cannot_fetch_register) (regno))
4424 return;
4425
4426 regaddr = register_addr (usrregs, regno);
4427 if (regaddr == -1)
4428 return;
4429
4430 size = ((register_size (regcache->tdesc, regno)
4431 + sizeof (PTRACE_XFER_TYPE) - 1)
4432 & -sizeof (PTRACE_XFER_TYPE));
4433 buf = alloca (size);
4434
4435 pid = lwpid_of (current_inferior);
4436 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4437 {
4438 errno = 0;
4439 *(PTRACE_XFER_TYPE *) (buf + i) =
4440 ptrace (PTRACE_PEEKUSER, pid,
4441 /* Coerce to a uintptr_t first to avoid potential gcc warning
4442 of coercing an 8 byte integer to a 4 byte pointer. */
4443 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4444 regaddr += sizeof (PTRACE_XFER_TYPE);
4445 if (errno != 0)
4446 error ("reading register %d: %s", regno, strerror (errno));
4447 }
4448
4449 if (the_low_target.supply_ptrace_register)
4450 the_low_target.supply_ptrace_register (regcache, regno, buf);
4451 else
4452 supply_register (regcache, regno, buf);
4453 }
4454
4455 /* Store one register. */
4456 static void
4457 store_register (const struct usrregs_info *usrregs,
4458 struct regcache *regcache, int regno)
4459 {
4460 CORE_ADDR regaddr;
4461 int i, size;
4462 char *buf;
4463 int pid;
4464
4465 if (regno >= usrregs->num_regs)
4466 return;
4467 if ((*the_low_target.cannot_store_register) (regno))
4468 return;
4469
4470 regaddr = register_addr (usrregs, regno);
4471 if (regaddr == -1)
4472 return;
4473
4474 size = ((register_size (regcache->tdesc, regno)
4475 + sizeof (PTRACE_XFER_TYPE) - 1)
4476 & -sizeof (PTRACE_XFER_TYPE));
4477 buf = alloca (size);
4478 memset (buf, 0, size);
4479
4480 if (the_low_target.collect_ptrace_register)
4481 the_low_target.collect_ptrace_register (regcache, regno, buf);
4482 else
4483 collect_register (regcache, regno, buf);
4484
4485 pid = lwpid_of (current_inferior);
4486 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4487 {
4488 errno = 0;
4489 ptrace (PTRACE_POKEUSER, pid,
4490 /* Coerce to a uintptr_t first to avoid potential gcc warning
4491 about coercing an 8 byte integer to a 4 byte pointer. */
4492 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4493 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4494 if (errno != 0)
4495 {
4496 /* At this point, ESRCH should mean the process is
4497 already gone, in which case we simply ignore attempts
4498 to change its registers. See also the related
4499 comment in linux_resume_one_lwp. */
4500 if (errno == ESRCH)
4501 return;
4502
4503 if ((*the_low_target.cannot_store_register) (regno) == 0)
4504 error ("writing register %d: %s", regno, strerror (errno));
4505 }
4506 regaddr += sizeof (PTRACE_XFER_TYPE);
4507 }
4508 }
4509
4510 /* Fetch all registers, or just one, from the child process.
4511 If REGNO is -1, do this for all registers, skipping any that are
4512 assumed to have been retrieved by regsets_fetch_inferior_registers,
4513 unless ALL is non-zero.
4514 Otherwise, REGNO specifies which register (so we can save time). */
4515 static void
4516 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4517 struct regcache *regcache, int regno, int all)
4518 {
4519 struct usrregs_info *usr = regs_info->usrregs;
4520
4521 if (regno == -1)
4522 {
4523 for (regno = 0; regno < usr->num_regs; regno++)
4524 if (all || !linux_register_in_regsets (regs_info, regno))
4525 fetch_register (usr, regcache, regno);
4526 }
4527 else
4528 fetch_register (usr, regcache, regno);
4529 }
4530
4531 /* Store our register values back into the inferior.
4532 If REGNO is -1, do this for all registers, skipping any that are
4533 assumed to have been saved by regsets_store_inferior_registers,
4534 unless ALL is non-zero.
4535 Otherwise, REGNO specifies which register (so we can save time). */
4536 static void
4537 usr_store_inferior_registers (const struct regs_info *regs_info,
4538 struct regcache *regcache, int regno, int all)
4539 {
4540 struct usrregs_info *usr = regs_info->usrregs;
4541
4542 if (regno == -1)
4543 {
4544 for (regno = 0; regno < usr->num_regs; regno++)
4545 if (all || !linux_register_in_regsets (regs_info, regno))
4546 store_register (usr, regcache, regno);
4547 }
4548 else
4549 store_register (usr, regcache, regno);
4550 }
4551
4552 #else /* !HAVE_LINUX_USRREGS */
4553
4554 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4555 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4556
4557 #endif
4558
4559
4560 void
4561 linux_fetch_registers (struct regcache *regcache, int regno)
4562 {
4563 int use_regsets;
4564 int all = 0;
4565 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4566
4567 if (regno == -1)
4568 {
4569 if (the_low_target.fetch_register != NULL
4570 && regs_info->usrregs != NULL)
4571 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4572 (*the_low_target.fetch_register) (regcache, regno);
4573
4574 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4575 if (regs_info->usrregs != NULL)
4576 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4577 }
4578 else
4579 {
4580 if (the_low_target.fetch_register != NULL
4581 && (*the_low_target.fetch_register) (regcache, regno))
4582 return;
4583
4584 use_regsets = linux_register_in_regsets (regs_info, regno);
4585 if (use_regsets)
4586 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4587 regcache);
4588 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4589 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4590 }
4591 }
4592
4593 void
4594 linux_store_registers (struct regcache *regcache, int regno)
4595 {
4596 int use_regsets;
4597 int all = 0;
4598 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4599
4600 if (regno == -1)
4601 {
4602 all = regsets_store_inferior_registers (regs_info->regsets_info,
4603 regcache);
4604 if (regs_info->usrregs != NULL)
4605 usr_store_inferior_registers (regs_info, regcache, regno, all);
4606 }
4607 else
4608 {
4609 use_regsets = linux_register_in_regsets (regs_info, regno);
4610 if (use_regsets)
4611 all = regsets_store_inferior_registers (regs_info->regsets_info,
4612 regcache);
4613 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4614 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4615 }
4616 }
4617
4618
4619 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4620 to debugger memory starting at MYADDR. */
4621
4622 static int
4623 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4624 {
4625 int pid = lwpid_of (current_inferior);
4626 register PTRACE_XFER_TYPE *buffer;
4627 register CORE_ADDR addr;
4628 register int count;
4629 char filename[64];
4630 register int i;
4631 int ret;
4632 int fd;
4633
4634 /* Try using /proc. Don't bother for one word. */
4635 if (len >= 3 * sizeof (long))
4636 {
4637 int bytes;
4638
4639 /* We could keep this file open and cache it - possibly one per
4640 thread. That requires some juggling, but is even faster. */
4641 sprintf (filename, "/proc/%d/mem", pid);
4642 fd = open (filename, O_RDONLY | O_LARGEFILE);
4643 if (fd == -1)
4644 goto no_proc;
4645
4646 /* If pread64 is available, use it. It's faster if the kernel
4647 supports it (only one syscall), and it's 64-bit safe even on
4648 32-bit platforms (for instance, SPARC debugging a SPARC64
4649 application). */
4650 #ifdef HAVE_PREAD64
4651 bytes = pread64 (fd, myaddr, len, memaddr);
4652 #else
4653 bytes = -1;
4654 if (lseek (fd, memaddr, SEEK_SET) != -1)
4655 bytes = read (fd, myaddr, len);
4656 #endif
4657
4658 close (fd);
4659 if (bytes == len)
4660 return 0;
4661
4662 /* Some data was read, we'll try to get the rest with ptrace. */
4663 if (bytes > 0)
4664 {
4665 memaddr += bytes;
4666 myaddr += bytes;
4667 len -= bytes;
4668 }
4669 }
4670
4671 no_proc:
4672 /* Round starting address down to longword boundary. */
4673 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4674 /* Round ending address up; get number of longwords that makes. */
4675 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4676 / sizeof (PTRACE_XFER_TYPE));
4677 /* Allocate buffer of that many longwords. */
4678 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4679
4680 /* Read all the longwords */
4681 errno = 0;
4682 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4683 {
4684 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4685 about coercing an 8 byte integer to a 4 byte pointer. */
4686 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4687 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4688 (PTRACE_TYPE_ARG4) 0);
4689 if (errno)
4690 break;
4691 }
4692 ret = errno;
4693
4694 /* Copy appropriate bytes out of the buffer. */
4695 if (i > 0)
4696 {
4697 i *= sizeof (PTRACE_XFER_TYPE);
4698 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4699 memcpy (myaddr,
4700 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4701 i < len ? i : len);
4702 }
4703
4704 return ret;
4705 }
4706
4707 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4708 memory at MEMADDR. On failure (cannot write to the inferior)
4709 returns the value of errno. Always succeeds if LEN is zero. */
4710
4711 static int
4712 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4713 {
4714 register int i;
4715 /* Round starting address down to longword boundary. */
4716 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4717 /* Round ending address up; get number of longwords that makes. */
4718 register int count
4719 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4720 / sizeof (PTRACE_XFER_TYPE);
4721
4722 /* Allocate buffer of that many longwords. */
4723 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4724 alloca (count * sizeof (PTRACE_XFER_TYPE));
4725
4726 int pid = lwpid_of (current_inferior);
4727
4728 if (len == 0)
4729 {
4730 /* Zero length write always succeeds. */
4731 return 0;
4732 }
4733
4734 if (debug_threads)
4735 {
4736 /* Dump up to four bytes. */
4737 unsigned int val = * (unsigned int *) myaddr;
4738 if (len == 1)
4739 val = val & 0xff;
4740 else if (len == 2)
4741 val = val & 0xffff;
4742 else if (len == 3)
4743 val = val & 0xffffff;
4744 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4745 val, (long)memaddr);
4746 }
4747
4748 /* Fill start and end extra bytes of buffer with existing memory data. */
4749
4750 errno = 0;
4751 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4752 about coercing an 8 byte integer to a 4 byte pointer. */
4753 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4754 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4755 (PTRACE_TYPE_ARG4) 0);
4756 if (errno)
4757 return errno;
4758
4759 if (count > 1)
4760 {
4761 errno = 0;
4762 buffer[count - 1]
4763 = ptrace (PTRACE_PEEKTEXT, pid,
4764 /* Coerce to a uintptr_t first to avoid potential gcc warning
4765 about coercing an 8 byte integer to a 4 byte pointer. */
4766 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4767 * sizeof (PTRACE_XFER_TYPE)),
4768 (PTRACE_TYPE_ARG4) 0);
4769 if (errno)
4770 return errno;
4771 }
4772
4773 /* Copy data to be written over corresponding part of buffer. */
4774
4775 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4776 myaddr, len);
4777
4778 /* Write the entire buffer. */
4779
4780 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4781 {
4782 errno = 0;
4783 ptrace (PTRACE_POKETEXT, pid,
4784 /* Coerce to a uintptr_t first to avoid potential gcc warning
4785 about coercing an 8 byte integer to a 4 byte pointer. */
4786 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4787 (PTRACE_TYPE_ARG4) buffer[i]);
4788 if (errno)
4789 return errno;
4790 }
4791
4792 return 0;
4793 }
4794
4795 static void
4796 linux_look_up_symbols (void)
4797 {
4798 #ifdef USE_THREAD_DB
4799 struct process_info *proc = current_process ();
4800
4801 if (proc->private->thread_db != NULL)
4802 return;
4803
4804 /* If the kernel supports tracing clones, then we don't need to
4805 use the magic thread event breakpoint to learn about
4806 threads. */
4807 thread_db_init (!linux_supports_traceclone ());
4808 #endif
4809 }
4810
4811 static void
4812 linux_request_interrupt (void)
4813 {
4814 extern unsigned long signal_pid;
4815
4816 if (!ptid_equal (cont_thread, null_ptid)
4817 && !ptid_equal (cont_thread, minus_one_ptid))
4818 {
4819 int lwpid;
4820
4821 lwpid = lwpid_of (current_inferior);
4822 kill_lwp (lwpid, SIGINT);
4823 }
4824 else
4825 kill_lwp (signal_pid, SIGINT);
4826 }
4827
4828 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4829 to debugger memory starting at MYADDR. */
4830
4831 static int
4832 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4833 {
4834 char filename[PATH_MAX];
4835 int fd, n;
4836 int pid = lwpid_of (current_inferior);
4837
4838 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4839
4840 fd = open (filename, O_RDONLY);
4841 if (fd < 0)
4842 return -1;
4843
4844 if (offset != (CORE_ADDR) 0
4845 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4846 n = -1;
4847 else
4848 n = read (fd, myaddr, len);
4849
4850 close (fd);
4851
4852 return n;
4853 }
4854
4855 /* These breakpoint and watchpoint related wrapper functions simply
4856 pass on the function call if the target has registered a
4857 corresponding function. */
4858
4859 static int
4860 linux_supports_z_point_type (char z_type)
4861 {
4862 return (the_low_target.supports_z_point_type != NULL
4863 && the_low_target.supports_z_point_type (z_type));
4864 }
4865
4866 static int
4867 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4868 int size, struct raw_breakpoint *bp)
4869 {
4870 if (the_low_target.insert_point != NULL)
4871 return the_low_target.insert_point (type, addr, size, bp);
4872 else
4873 /* Unsupported (see target.h). */
4874 return 1;
4875 }
4876
4877 static int
4878 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4879 int size, struct raw_breakpoint *bp)
4880 {
4881 if (the_low_target.remove_point != NULL)
4882 return the_low_target.remove_point (type, addr, size, bp);
4883 else
4884 /* Unsupported (see target.h). */
4885 return 1;
4886 }
4887
4888 static int
4889 linux_stopped_by_watchpoint (void)
4890 {
4891 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4892
4893 return lwp->stopped_by_watchpoint;
4894 }
4895
4896 static CORE_ADDR
4897 linux_stopped_data_address (void)
4898 {
4899 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4900
4901 return lwp->stopped_data_address;
4902 }
4903
4904 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4905 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4906 && defined(PT_TEXT_END_ADDR)
4907
4908 /* This is only used for targets that define PT_TEXT_ADDR,
4909 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4910 the target has different ways of acquiring this information, like
4911 loadmaps. */
4912
4913 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4914 to tell gdb about. */
4915
4916 static int
4917 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4918 {
4919 unsigned long text, text_end, data;
4920 int pid = lwpid_of (get_thread_lwp (current_inferior));
4921
4922 errno = 0;
4923
4924 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4925 (PTRACE_TYPE_ARG4) 0);
4926 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4927 (PTRACE_TYPE_ARG4) 0);
4928 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4929 (PTRACE_TYPE_ARG4) 0);
4930
4931 if (errno == 0)
4932 {
4933 /* Both text and data offsets produced at compile-time (and so
4934 used by gdb) are relative to the beginning of the program,
4935 with the data segment immediately following the text segment.
4936 However, the actual runtime layout in memory may put the data
4937 somewhere else, so when we send gdb a data base-address, we
4938 use the real data base address and subtract the compile-time
4939 data base-address from it (which is just the length of the
4940 text segment). BSS immediately follows data in both
4941 cases. */
4942 *text_p = text;
4943 *data_p = data - (text_end - text);
4944
4945 return 1;
4946 }
4947 return 0;
4948 }
4949 #endif
4950
4951 static int
4952 linux_qxfer_osdata (const char *annex,
4953 unsigned char *readbuf, unsigned const char *writebuf,
4954 CORE_ADDR offset, int len)
4955 {
4956 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4957 }
4958
4959 /* Convert a native/host siginfo object, into/from the siginfo in the
4960 layout of the inferiors' architecture. */
4961
4962 static void
4963 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4964 {
4965 int done = 0;
4966
4967 if (the_low_target.siginfo_fixup != NULL)
4968 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4969
4970 /* If there was no callback, or the callback didn't do anything,
4971 then just do a straight memcpy. */
4972 if (!done)
4973 {
4974 if (direction == 1)
4975 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4976 else
4977 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4978 }
4979 }
4980
4981 static int
4982 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4983 unsigned const char *writebuf, CORE_ADDR offset, int len)
4984 {
4985 int pid;
4986 siginfo_t siginfo;
4987 char inf_siginfo[sizeof (siginfo_t)];
4988
4989 if (current_inferior == NULL)
4990 return -1;
4991
4992 pid = lwpid_of (current_inferior);
4993
4994 if (debug_threads)
4995 debug_printf ("%s siginfo for lwp %d.\n",
4996 readbuf != NULL ? "Reading" : "Writing",
4997 pid);
4998
4999 if (offset >= sizeof (siginfo))
5000 return -1;
5001
5002 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5003 return -1;
5004
5005 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5006 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5007 inferior with a 64-bit GDBSERVER should look the same as debugging it
5008 with a 32-bit GDBSERVER, we need to convert it. */
5009 siginfo_fixup (&siginfo, inf_siginfo, 0);
5010
5011 if (offset + len > sizeof (siginfo))
5012 len = sizeof (siginfo) - offset;
5013
5014 if (readbuf != NULL)
5015 memcpy (readbuf, inf_siginfo + offset, len);
5016 else
5017 {
5018 memcpy (inf_siginfo + offset, writebuf, len);
5019
5020 /* Convert back to ptrace layout before flushing it out. */
5021 siginfo_fixup (&siginfo, inf_siginfo, 1);
5022
5023 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5024 return -1;
5025 }
5026
5027 return len;
5028 }
5029
5030 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5031 so we notice when children change state; as the handler for the
5032 sigsuspend in my_waitpid. */
5033
5034 static void
5035 sigchld_handler (int signo)
5036 {
5037 int old_errno = errno;
5038
5039 if (debug_threads)
5040 {
5041 do
5042 {
5043 /* fprintf is not async-signal-safe, so call write
5044 directly. */
5045 if (write (2, "sigchld_handler\n",
5046 sizeof ("sigchld_handler\n") - 1) < 0)
5047 break; /* just ignore */
5048 } while (0);
5049 }
5050
5051 if (target_is_async_p ())
5052 async_file_mark (); /* trigger a linux_wait */
5053
5054 errno = old_errno;
5055 }
5056
5057 static int
5058 linux_supports_non_stop (void)
5059 {
5060 return 1;
5061 }
5062
5063 static int
5064 linux_async (int enable)
5065 {
5066 int previous = (linux_event_pipe[0] != -1);
5067
5068 if (debug_threads)
5069 debug_printf ("linux_async (%d), previous=%d\n",
5070 enable, previous);
5071
5072 if (previous != enable)
5073 {
5074 sigset_t mask;
5075 sigemptyset (&mask);
5076 sigaddset (&mask, SIGCHLD);
5077
5078 sigprocmask (SIG_BLOCK, &mask, NULL);
5079
5080 if (enable)
5081 {
5082 if (pipe (linux_event_pipe) == -1)
5083 fatal ("creating event pipe failed.");
5084
5085 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5086 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5087
5088 /* Register the event loop handler. */
5089 add_file_handler (linux_event_pipe[0],
5090 handle_target_event, NULL);
5091
5092 /* Always trigger a linux_wait. */
5093 async_file_mark ();
5094 }
5095 else
5096 {
5097 delete_file_handler (linux_event_pipe[0]);
5098
5099 close (linux_event_pipe[0]);
5100 close (linux_event_pipe[1]);
5101 linux_event_pipe[0] = -1;
5102 linux_event_pipe[1] = -1;
5103 }
5104
5105 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5106 }
5107
5108 return previous;
5109 }
5110
5111 static int
5112 linux_start_non_stop (int nonstop)
5113 {
5114 /* Register or unregister from event-loop accordingly. */
5115 linux_async (nonstop);
5116 return 0;
5117 }
5118
5119 static int
5120 linux_supports_multi_process (void)
5121 {
5122 return 1;
5123 }
5124
5125 static int
5126 linux_supports_disable_randomization (void)
5127 {
5128 #ifdef HAVE_PERSONALITY
5129 return 1;
5130 #else
5131 return 0;
5132 #endif
5133 }
5134
5135 static int
5136 linux_supports_agent (void)
5137 {
5138 return 1;
5139 }
5140
5141 static int
5142 linux_supports_range_stepping (void)
5143 {
5144 if (*the_low_target.supports_range_stepping == NULL)
5145 return 0;
5146
5147 return (*the_low_target.supports_range_stepping) ();
5148 }
5149
5150 /* Enumerate spufs IDs for process PID. */
5151 static int
5152 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5153 {
5154 int pos = 0;
5155 int written = 0;
5156 char path[128];
5157 DIR *dir;
5158 struct dirent *entry;
5159
5160 sprintf (path, "/proc/%ld/fd", pid);
5161 dir = opendir (path);
5162 if (!dir)
5163 return -1;
5164
5165 rewinddir (dir);
5166 while ((entry = readdir (dir)) != NULL)
5167 {
5168 struct stat st;
5169 struct statfs stfs;
5170 int fd;
5171
5172 fd = atoi (entry->d_name);
5173 if (!fd)
5174 continue;
5175
5176 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5177 if (stat (path, &st) != 0)
5178 continue;
5179 if (!S_ISDIR (st.st_mode))
5180 continue;
5181
5182 if (statfs (path, &stfs) != 0)
5183 continue;
5184 if (stfs.f_type != SPUFS_MAGIC)
5185 continue;
5186
5187 if (pos >= offset && pos + 4 <= offset + len)
5188 {
5189 *(unsigned int *)(buf + pos - offset) = fd;
5190 written += 4;
5191 }
5192 pos += 4;
5193 }
5194
5195 closedir (dir);
5196 return written;
5197 }
5198
5199 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5200 object type, using the /proc file system. */
5201 static int
5202 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5203 unsigned const char *writebuf,
5204 CORE_ADDR offset, int len)
5205 {
5206 long pid = lwpid_of (current_inferior);
5207 char buf[128];
5208 int fd = 0;
5209 int ret = 0;
5210
5211 if (!writebuf && !readbuf)
5212 return -1;
5213
5214 if (!*annex)
5215 {
5216 if (!readbuf)
5217 return -1;
5218 else
5219 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5220 }
5221
5222 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5223 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5224 if (fd <= 0)
5225 return -1;
5226
5227 if (offset != 0
5228 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5229 {
5230 close (fd);
5231 return 0;
5232 }
5233
5234 if (writebuf)
5235 ret = write (fd, writebuf, (size_t) len);
5236 else
5237 ret = read (fd, readbuf, (size_t) len);
5238
5239 close (fd);
5240 return ret;
5241 }
5242
5243 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5244 struct target_loadseg
5245 {
5246 /* Core address to which the segment is mapped. */
5247 Elf32_Addr addr;
5248 /* VMA recorded in the program header. */
5249 Elf32_Addr p_vaddr;
5250 /* Size of this segment in memory. */
5251 Elf32_Word p_memsz;
5252 };
5253
5254 # if defined PT_GETDSBT
5255 struct target_loadmap
5256 {
5257 /* Protocol version number, must be zero. */
5258 Elf32_Word version;
5259 /* Pointer to the DSBT table, its size, and the DSBT index. */
5260 unsigned *dsbt_table;
5261 unsigned dsbt_size, dsbt_index;
5262 /* Number of segments in this map. */
5263 Elf32_Word nsegs;
5264 /* The actual memory map. */
5265 struct target_loadseg segs[/*nsegs*/];
5266 };
5267 # define LINUX_LOADMAP PT_GETDSBT
5268 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5269 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5270 # else
5271 struct target_loadmap
5272 {
5273 /* Protocol version number, must be zero. */
5274 Elf32_Half version;
5275 /* Number of segments in this map. */
5276 Elf32_Half nsegs;
5277 /* The actual memory map. */
5278 struct target_loadseg segs[/*nsegs*/];
5279 };
5280 # define LINUX_LOADMAP PTRACE_GETFDPIC
5281 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5282 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5283 # endif
5284
5285 static int
5286 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5287 unsigned char *myaddr, unsigned int len)
5288 {
5289 int pid = lwpid_of (current_inferior);
5290 int addr = -1;
5291 struct target_loadmap *data = NULL;
5292 unsigned int actual_length, copy_length;
5293
5294 if (strcmp (annex, "exec") == 0)
5295 addr = (int) LINUX_LOADMAP_EXEC;
5296 else if (strcmp (annex, "interp") == 0)
5297 addr = (int) LINUX_LOADMAP_INTERP;
5298 else
5299 return -1;
5300
5301 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5302 return -1;
5303
5304 if (data == NULL)
5305 return -1;
5306
5307 actual_length = sizeof (struct target_loadmap)
5308 + sizeof (struct target_loadseg) * data->nsegs;
5309
5310 if (offset < 0 || offset > actual_length)
5311 return -1;
5312
5313 copy_length = actual_length - offset < len ? actual_length - offset : len;
5314 memcpy (myaddr, (char *) data + offset, copy_length);
5315 return copy_length;
5316 }
5317 #else
5318 # define linux_read_loadmap NULL
5319 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5320
5321 static void
5322 linux_process_qsupported (const char *query)
5323 {
5324 if (the_low_target.process_qsupported != NULL)
5325 the_low_target.process_qsupported (query);
5326 }
5327
5328 static int
5329 linux_supports_tracepoints (void)
5330 {
5331 if (*the_low_target.supports_tracepoints == NULL)
5332 return 0;
5333
5334 return (*the_low_target.supports_tracepoints) ();
5335 }
5336
5337 static CORE_ADDR
5338 linux_read_pc (struct regcache *regcache)
5339 {
5340 if (the_low_target.get_pc == NULL)
5341 return 0;
5342
5343 return (*the_low_target.get_pc) (regcache);
5344 }
5345
5346 static void
5347 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5348 {
5349 gdb_assert (the_low_target.set_pc != NULL);
5350
5351 (*the_low_target.set_pc) (regcache, pc);
5352 }
5353
5354 static int
5355 linux_thread_stopped (struct thread_info *thread)
5356 {
5357 return get_thread_lwp (thread)->stopped;
5358 }
5359
5360 /* This exposes stop-all-threads functionality to other modules. */
5361
5362 static void
5363 linux_pause_all (int freeze)
5364 {
5365 stop_all_lwps (freeze, NULL);
5366 }
5367
5368 /* This exposes unstop-all-threads functionality to other gdbserver
5369 modules. */
5370
5371 static void
5372 linux_unpause_all (int unfreeze)
5373 {
5374 unstop_all_lwps (unfreeze, NULL);
5375 }
5376
5377 static int
5378 linux_prepare_to_access_memory (void)
5379 {
5380 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5381 running LWP. */
5382 if (non_stop)
5383 linux_pause_all (1);
5384 return 0;
5385 }
5386
5387 static void
5388 linux_done_accessing_memory (void)
5389 {
5390 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5391 running LWP. */
5392 if (non_stop)
5393 linux_unpause_all (1);
5394 }
5395
5396 static int
5397 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5398 CORE_ADDR collector,
5399 CORE_ADDR lockaddr,
5400 ULONGEST orig_size,
5401 CORE_ADDR *jump_entry,
5402 CORE_ADDR *trampoline,
5403 ULONGEST *trampoline_size,
5404 unsigned char *jjump_pad_insn,
5405 ULONGEST *jjump_pad_insn_size,
5406 CORE_ADDR *adjusted_insn_addr,
5407 CORE_ADDR *adjusted_insn_addr_end,
5408 char *err)
5409 {
5410 return (*the_low_target.install_fast_tracepoint_jump_pad)
5411 (tpoint, tpaddr, collector, lockaddr, orig_size,
5412 jump_entry, trampoline, trampoline_size,
5413 jjump_pad_insn, jjump_pad_insn_size,
5414 adjusted_insn_addr, adjusted_insn_addr_end,
5415 err);
5416 }
5417
5418 static struct emit_ops *
5419 linux_emit_ops (void)
5420 {
5421 if (the_low_target.emit_ops != NULL)
5422 return (*the_low_target.emit_ops) ();
5423 else
5424 return NULL;
5425 }
5426
5427 static int
5428 linux_get_min_fast_tracepoint_insn_len (void)
5429 {
5430 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5431 }
5432
5433 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5434
5435 static int
5436 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5437 CORE_ADDR *phdr_memaddr, int *num_phdr)
5438 {
5439 char filename[PATH_MAX];
5440 int fd;
5441 const int auxv_size = is_elf64
5442 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5443 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5444
5445 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5446
5447 fd = open (filename, O_RDONLY);
5448 if (fd < 0)
5449 return 1;
5450
5451 *phdr_memaddr = 0;
5452 *num_phdr = 0;
5453 while (read (fd, buf, auxv_size) == auxv_size
5454 && (*phdr_memaddr == 0 || *num_phdr == 0))
5455 {
5456 if (is_elf64)
5457 {
5458 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5459
5460 switch (aux->a_type)
5461 {
5462 case AT_PHDR:
5463 *phdr_memaddr = aux->a_un.a_val;
5464 break;
5465 case AT_PHNUM:
5466 *num_phdr = aux->a_un.a_val;
5467 break;
5468 }
5469 }
5470 else
5471 {
5472 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5473
5474 switch (aux->a_type)
5475 {
5476 case AT_PHDR:
5477 *phdr_memaddr = aux->a_un.a_val;
5478 break;
5479 case AT_PHNUM:
5480 *num_phdr = aux->a_un.a_val;
5481 break;
5482 }
5483 }
5484 }
5485
5486 close (fd);
5487
5488 if (*phdr_memaddr == 0 || *num_phdr == 0)
5489 {
5490 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5491 "phdr_memaddr = %ld, phdr_num = %d",
5492 (long) *phdr_memaddr, *num_phdr);
5493 return 2;
5494 }
5495
5496 return 0;
5497 }
5498
5499 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5500
5501 static CORE_ADDR
5502 get_dynamic (const int pid, const int is_elf64)
5503 {
5504 CORE_ADDR phdr_memaddr, relocation;
5505 int num_phdr, i;
5506 unsigned char *phdr_buf;
5507 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5508
5509 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5510 return 0;
5511
5512 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5513 phdr_buf = alloca (num_phdr * phdr_size);
5514
5515 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5516 return 0;
5517
5518 /* Compute relocation: it is expected to be 0 for "regular" executables,
5519 non-zero for PIE ones. */
5520 relocation = -1;
5521 for (i = 0; relocation == -1 && i < num_phdr; i++)
5522 if (is_elf64)
5523 {
5524 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5525
5526 if (p->p_type == PT_PHDR)
5527 relocation = phdr_memaddr - p->p_vaddr;
5528 }
5529 else
5530 {
5531 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5532
5533 if (p->p_type == PT_PHDR)
5534 relocation = phdr_memaddr - p->p_vaddr;
5535 }
5536
5537 if (relocation == -1)
5538 {
5539 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5540 any real world executables, including PIE executables, have always
5541 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5542 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5543 or present DT_DEBUG anyway (fpc binaries are statically linked).
5544
5545 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5546
5547 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5548
5549 return 0;
5550 }
5551
5552 for (i = 0; i < num_phdr; i++)
5553 {
5554 if (is_elf64)
5555 {
5556 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5557
5558 if (p->p_type == PT_DYNAMIC)
5559 return p->p_vaddr + relocation;
5560 }
5561 else
5562 {
5563 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5564
5565 if (p->p_type == PT_DYNAMIC)
5566 return p->p_vaddr + relocation;
5567 }
5568 }
5569
5570 return 0;
5571 }
5572
5573 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5574 can be 0 if the inferior does not yet have the library list initialized.
5575 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5576 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5577
5578 static CORE_ADDR
5579 get_r_debug (const int pid, const int is_elf64)
5580 {
5581 CORE_ADDR dynamic_memaddr;
5582 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5583 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5584 CORE_ADDR map = -1;
5585
5586 dynamic_memaddr = get_dynamic (pid, is_elf64);
5587 if (dynamic_memaddr == 0)
5588 return map;
5589
5590 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5591 {
5592 if (is_elf64)
5593 {
5594 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5595 #ifdef DT_MIPS_RLD_MAP
5596 union
5597 {
5598 Elf64_Xword map;
5599 unsigned char buf[sizeof (Elf64_Xword)];
5600 }
5601 rld_map;
5602
5603 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5604 {
5605 if (linux_read_memory (dyn->d_un.d_val,
5606 rld_map.buf, sizeof (rld_map.buf)) == 0)
5607 return rld_map.map;
5608 else
5609 break;
5610 }
5611 #endif /* DT_MIPS_RLD_MAP */
5612
5613 if (dyn->d_tag == DT_DEBUG && map == -1)
5614 map = dyn->d_un.d_val;
5615
5616 if (dyn->d_tag == DT_NULL)
5617 break;
5618 }
5619 else
5620 {
5621 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5622 #ifdef DT_MIPS_RLD_MAP
5623 union
5624 {
5625 Elf32_Word map;
5626 unsigned char buf[sizeof (Elf32_Word)];
5627 }
5628 rld_map;
5629
5630 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5631 {
5632 if (linux_read_memory (dyn->d_un.d_val,
5633 rld_map.buf, sizeof (rld_map.buf)) == 0)
5634 return rld_map.map;
5635 else
5636 break;
5637 }
5638 #endif /* DT_MIPS_RLD_MAP */
5639
5640 if (dyn->d_tag == DT_DEBUG && map == -1)
5641 map = dyn->d_un.d_val;
5642
5643 if (dyn->d_tag == DT_NULL)
5644 break;
5645 }
5646
5647 dynamic_memaddr += dyn_size;
5648 }
5649
5650 return map;
5651 }
5652
5653 /* Read one pointer from MEMADDR in the inferior. */
5654
5655 static int
5656 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5657 {
5658 int ret;
5659
5660 /* Go through a union so this works on either big or little endian
5661 hosts, when the inferior's pointer size is smaller than the size
5662 of CORE_ADDR. It is assumed the inferior's endianness is the
5663 same of the superior's. */
5664 union
5665 {
5666 CORE_ADDR core_addr;
5667 unsigned int ui;
5668 unsigned char uc;
5669 } addr;
5670
5671 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5672 if (ret == 0)
5673 {
5674 if (ptr_size == sizeof (CORE_ADDR))
5675 *ptr = addr.core_addr;
5676 else if (ptr_size == sizeof (unsigned int))
5677 *ptr = addr.ui;
5678 else
5679 gdb_assert_not_reached ("unhandled pointer size");
5680 }
5681 return ret;
5682 }
5683
5684 struct link_map_offsets
5685 {
5686 /* Offset and size of r_debug.r_version. */
5687 int r_version_offset;
5688
5689 /* Offset and size of r_debug.r_map. */
5690 int r_map_offset;
5691
5692 /* Offset to l_addr field in struct link_map. */
5693 int l_addr_offset;
5694
5695 /* Offset to l_name field in struct link_map. */
5696 int l_name_offset;
5697
5698 /* Offset to l_ld field in struct link_map. */
5699 int l_ld_offset;
5700
5701 /* Offset to l_next field in struct link_map. */
5702 int l_next_offset;
5703
5704 /* Offset to l_prev field in struct link_map. */
5705 int l_prev_offset;
5706 };
5707
5708 /* Construct qXfer:libraries-svr4:read reply. */
5709
5710 static int
5711 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5712 unsigned const char *writebuf,
5713 CORE_ADDR offset, int len)
5714 {
5715 char *document;
5716 unsigned document_len;
5717 struct process_info_private *const priv = current_process ()->private;
5718 char filename[PATH_MAX];
5719 int pid, is_elf64;
5720
5721 static const struct link_map_offsets lmo_32bit_offsets =
5722 {
5723 0, /* r_version offset. */
5724 4, /* r_debug.r_map offset. */
5725 0, /* l_addr offset in link_map. */
5726 4, /* l_name offset in link_map. */
5727 8, /* l_ld offset in link_map. */
5728 12, /* l_next offset in link_map. */
5729 16 /* l_prev offset in link_map. */
5730 };
5731
5732 static const struct link_map_offsets lmo_64bit_offsets =
5733 {
5734 0, /* r_version offset. */
5735 8, /* r_debug.r_map offset. */
5736 0, /* l_addr offset in link_map. */
5737 8, /* l_name offset in link_map. */
5738 16, /* l_ld offset in link_map. */
5739 24, /* l_next offset in link_map. */
5740 32 /* l_prev offset in link_map. */
5741 };
5742 const struct link_map_offsets *lmo;
5743 unsigned int machine;
5744 int ptr_size;
5745 CORE_ADDR lm_addr = 0, lm_prev = 0;
5746 int allocated = 1024;
5747 char *p;
5748 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5749 int header_done = 0;
5750
5751 if (writebuf != NULL)
5752 return -2;
5753 if (readbuf == NULL)
5754 return -1;
5755
5756 pid = lwpid_of (current_inferior);
5757 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5758 is_elf64 = elf_64_file_p (filename, &machine);
5759 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5760 ptr_size = is_elf64 ? 8 : 4;
5761
5762 while (annex[0] != '\0')
5763 {
5764 const char *sep;
5765 CORE_ADDR *addrp;
5766 int len;
5767
5768 sep = strchr (annex, '=');
5769 if (sep == NULL)
5770 break;
5771
5772 len = sep - annex;
5773 if (len == 5 && strncmp (annex, "start", 5) == 0)
5774 addrp = &lm_addr;
5775 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5776 addrp = &lm_prev;
5777 else
5778 {
5779 annex = strchr (sep, ';');
5780 if (annex == NULL)
5781 break;
5782 annex++;
5783 continue;
5784 }
5785
5786 annex = decode_address_to_semicolon (addrp, sep + 1);
5787 }
5788
5789 if (lm_addr == 0)
5790 {
5791 int r_version = 0;
5792
5793 if (priv->r_debug == 0)
5794 priv->r_debug = get_r_debug (pid, is_elf64);
5795
5796 /* We failed to find DT_DEBUG. Such situation will not change
5797 for this inferior - do not retry it. Report it to GDB as
5798 E01, see for the reasons at the GDB solib-svr4.c side. */
5799 if (priv->r_debug == (CORE_ADDR) -1)
5800 return -1;
5801
5802 if (priv->r_debug != 0)
5803 {
5804 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5805 (unsigned char *) &r_version,
5806 sizeof (r_version)) != 0
5807 || r_version != 1)
5808 {
5809 warning ("unexpected r_debug version %d", r_version);
5810 }
5811 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5812 &lm_addr, ptr_size) != 0)
5813 {
5814 warning ("unable to read r_map from 0x%lx",
5815 (long) priv->r_debug + lmo->r_map_offset);
5816 }
5817 }
5818 }
5819
5820 document = xmalloc (allocated);
5821 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5822 p = document + strlen (document);
5823
5824 while (lm_addr
5825 && read_one_ptr (lm_addr + lmo->l_name_offset,
5826 &l_name, ptr_size) == 0
5827 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5828 &l_addr, ptr_size) == 0
5829 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5830 &l_ld, ptr_size) == 0
5831 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5832 &l_prev, ptr_size) == 0
5833 && read_one_ptr (lm_addr + lmo->l_next_offset,
5834 &l_next, ptr_size) == 0)
5835 {
5836 unsigned char libname[PATH_MAX];
5837
5838 if (lm_prev != l_prev)
5839 {
5840 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5841 (long) lm_prev, (long) l_prev);
5842 break;
5843 }
5844
5845 /* Ignore the first entry even if it has valid name as the first entry
5846 corresponds to the main executable. The first entry should not be
5847 skipped if the dynamic loader was loaded late by a static executable
5848 (see solib-svr4.c parameter ignore_first). But in such case the main
5849 executable does not have PT_DYNAMIC present and this function already
5850 exited above due to failed get_r_debug. */
5851 if (lm_prev == 0)
5852 {
5853 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5854 p = p + strlen (p);
5855 }
5856 else
5857 {
5858 /* Not checking for error because reading may stop before
5859 we've got PATH_MAX worth of characters. */
5860 libname[0] = '\0';
5861 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5862 libname[sizeof (libname) - 1] = '\0';
5863 if (libname[0] != '\0')
5864 {
5865 /* 6x the size for xml_escape_text below. */
5866 size_t len = 6 * strlen ((char *) libname);
5867 char *name;
5868
5869 if (!header_done)
5870 {
5871 /* Terminate `<library-list-svr4'. */
5872 *p++ = '>';
5873 header_done = 1;
5874 }
5875
5876 while (allocated < p - document + len + 200)
5877 {
5878 /* Expand to guarantee sufficient storage. */
5879 uintptr_t document_len = p - document;
5880
5881 document = xrealloc (document, 2 * allocated);
5882 allocated *= 2;
5883 p = document + document_len;
5884 }
5885
5886 name = xml_escape_text ((char *) libname);
5887 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5888 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5889 name, (unsigned long) lm_addr,
5890 (unsigned long) l_addr, (unsigned long) l_ld);
5891 free (name);
5892 }
5893 }
5894
5895 lm_prev = lm_addr;
5896 lm_addr = l_next;
5897 }
5898
5899 if (!header_done)
5900 {
5901 /* Empty list; terminate `<library-list-svr4'. */
5902 strcpy (p, "/>");
5903 }
5904 else
5905 strcpy (p, "</library-list-svr4>");
5906
5907 document_len = strlen (document);
5908 if (offset < document_len)
5909 document_len -= offset;
5910 else
5911 document_len = 0;
5912 if (len > document_len)
5913 len = document_len;
5914
5915 memcpy (readbuf, document + offset, len);
5916 xfree (document);
5917
5918 return len;
5919 }
5920
5921 #ifdef HAVE_LINUX_BTRACE
5922
5923 /* See to_enable_btrace target method. */
5924
5925 static struct btrace_target_info *
5926 linux_low_enable_btrace (ptid_t ptid)
5927 {
5928 struct btrace_target_info *tinfo;
5929
5930 tinfo = linux_enable_btrace (ptid);
5931
5932 if (tinfo != NULL)
5933 {
5934 struct thread_info *thread = find_thread_ptid (ptid);
5935 struct regcache *regcache = get_thread_regcache (thread, 0);
5936
5937 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5938 }
5939
5940 return tinfo;
5941 }
5942
5943 /* See to_disable_btrace target method. */
5944
5945 static int
5946 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5947 {
5948 enum btrace_error err;
5949
5950 err = linux_disable_btrace (tinfo);
5951 return (err == BTRACE_ERR_NONE ? 0 : -1);
5952 }
5953
5954 /* See to_read_btrace target method. */
5955
5956 static int
5957 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5958 int type)
5959 {
5960 VEC (btrace_block_s) *btrace;
5961 struct btrace_block *block;
5962 enum btrace_error err;
5963 int i;
5964
5965 btrace = NULL;
5966 err = linux_read_btrace (&btrace, tinfo, type);
5967 if (err != BTRACE_ERR_NONE)
5968 {
5969 if (err == BTRACE_ERR_OVERFLOW)
5970 buffer_grow_str0 (buffer, "E.Overflow.");
5971 else
5972 buffer_grow_str0 (buffer, "E.Generic Error.");
5973
5974 return -1;
5975 }
5976
5977 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5978 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5979
5980 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5981 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5982 paddress (block->begin), paddress (block->end));
5983
5984 buffer_grow_str0 (buffer, "</btrace>\n");
5985
5986 VEC_free (btrace_block_s, btrace);
5987
5988 return 0;
5989 }
5990 #endif /* HAVE_LINUX_BTRACE */
5991
5992 static struct target_ops linux_target_ops = {
5993 linux_create_inferior,
5994 linux_attach,
5995 linux_kill,
5996 linux_detach,
5997 linux_mourn,
5998 linux_join,
5999 linux_thread_alive,
6000 linux_resume,
6001 linux_wait,
6002 linux_fetch_registers,
6003 linux_store_registers,
6004 linux_prepare_to_access_memory,
6005 linux_done_accessing_memory,
6006 linux_read_memory,
6007 linux_write_memory,
6008 linux_look_up_symbols,
6009 linux_request_interrupt,
6010 linux_read_auxv,
6011 linux_supports_z_point_type,
6012 linux_insert_point,
6013 linux_remove_point,
6014 linux_stopped_by_watchpoint,
6015 linux_stopped_data_address,
6016 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6017 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6018 && defined(PT_TEXT_END_ADDR)
6019 linux_read_offsets,
6020 #else
6021 NULL,
6022 #endif
6023 #ifdef USE_THREAD_DB
6024 thread_db_get_tls_address,
6025 #else
6026 NULL,
6027 #endif
6028 linux_qxfer_spu,
6029 hostio_last_error_from_errno,
6030 linux_qxfer_osdata,
6031 linux_xfer_siginfo,
6032 linux_supports_non_stop,
6033 linux_async,
6034 linux_start_non_stop,
6035 linux_supports_multi_process,
6036 #ifdef USE_THREAD_DB
6037 thread_db_handle_monitor_command,
6038 #else
6039 NULL,
6040 #endif
6041 linux_common_core_of_thread,
6042 linux_read_loadmap,
6043 linux_process_qsupported,
6044 linux_supports_tracepoints,
6045 linux_read_pc,
6046 linux_write_pc,
6047 linux_thread_stopped,
6048 NULL,
6049 linux_pause_all,
6050 linux_unpause_all,
6051 linux_cancel_breakpoints,
6052 linux_stabilize_threads,
6053 linux_install_fast_tracepoint_jump_pad,
6054 linux_emit_ops,
6055 linux_supports_disable_randomization,
6056 linux_get_min_fast_tracepoint_insn_len,
6057 linux_qxfer_libraries_svr4,
6058 linux_supports_agent,
6059 #ifdef HAVE_LINUX_BTRACE
6060 linux_supports_btrace,
6061 linux_low_enable_btrace,
6062 linux_low_disable_btrace,
6063 linux_low_read_btrace,
6064 #else
6065 NULL,
6066 NULL,
6067 NULL,
6068 NULL,
6069 #endif
6070 linux_supports_range_stepping,
6071 };
6072
6073 static void
6074 linux_init_signals ()
6075 {
6076 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6077 to find what the cancel signal actually is. */
6078 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6079 signal (__SIGRTMIN+1, SIG_IGN);
6080 #endif
6081 }
6082
6083 #ifdef HAVE_LINUX_REGSETS
6084 void
6085 initialize_regsets_info (struct regsets_info *info)
6086 {
6087 for (info->num_regsets = 0;
6088 info->regsets[info->num_regsets].size >= 0;
6089 info->num_regsets++)
6090 ;
6091 }
6092 #endif
6093
6094 void
6095 initialize_low (void)
6096 {
6097 struct sigaction sigchld_action;
6098 memset (&sigchld_action, 0, sizeof (sigchld_action));
6099 set_target_ops (&linux_target_ops);
6100 set_breakpoint_data (the_low_target.breakpoint,
6101 the_low_target.breakpoint_len);
6102 linux_init_signals ();
6103 linux_ptrace_init_warnings ();
6104
6105 sigchld_action.sa_handler = sigchld_handler;
6106 sigemptyset (&sigchld_action.sa_mask);
6107 sigchld_action.sa_flags = SA_RESTART;
6108 sigaction (SIGCHLD, &sigchld_action, NULL);
6109
6110 initialize_low_arch ();
6111 }
This page took 0.147719 seconds and 5 git commands to generate.