215a80cee16fcc7ecf7a44ad4e399505618ac0ec
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <stdio.h>
28 #include <sys/ptrace.h>
29 #include "nat/linux-ptrace.h"
30 #include "nat/linux-procfs.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include "filestuff.h"
48 #include "tracepoint.h"
49 #include "hostio.h"
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* A list of all unknown processes which receive stop signals. Some
142 other process will presumably claim each of these as forked
143 children momentarily. */
144
145 struct simple_pid_list
146 {
147 /* The process ID. */
148 int pid;
149
150 /* The status as reported by waitpid. */
151 int status;
152
153 /* Next in chain. */
154 struct simple_pid_list *next;
155 };
156 struct simple_pid_list *stopped_pids;
157
158 /* Trivial list manipulation functions to keep track of a list of new
159 stopped processes. */
160
161 static void
162 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
163 {
164 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
165
166 new_pid->pid = pid;
167 new_pid->status = status;
168 new_pid->next = *listp;
169 *listp = new_pid;
170 }
171
172 static int
173 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
174 {
175 struct simple_pid_list **p;
176
177 for (p = listp; *p != NULL; p = &(*p)->next)
178 if ((*p)->pid == pid)
179 {
180 struct simple_pid_list *next = (*p)->next;
181
182 *statusp = (*p)->status;
183 xfree (*p);
184 *p = next;
185 return 1;
186 }
187 return 0;
188 }
189
190 enum stopping_threads_kind
191 {
192 /* Not stopping threads presently. */
193 NOT_STOPPING_THREADS,
194
195 /* Stopping threads. */
196 STOPPING_THREADS,
197
198 /* Stopping and suspending threads. */
199 STOPPING_AND_SUSPENDING_THREADS
200 };
201
202 /* This is set while stop_all_lwps is in effect. */
203 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
204
205 /* FIXME make into a target method? */
206 int using_threads = 1;
207
208 /* True if we're presently stabilizing threads (moving them out of
209 jump pads). */
210 static int stabilizing_threads;
211
212 static void linux_resume_one_lwp (struct lwp_info *lwp,
213 int step, int signal, siginfo_t *info);
214 static void linux_resume (struct thread_resume *resume_info, size_t n);
215 static void stop_all_lwps (int suspend, struct lwp_info *except);
216 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
217 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
218 int *wstat, int options);
219 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
220 static struct lwp_info *add_lwp (ptid_t ptid);
221 static int linux_stopped_by_watchpoint (void);
222 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
223 static void proceed_all_lwps (void);
224 static int finish_step_over (struct lwp_info *lwp);
225 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
226 static int kill_lwp (unsigned long lwpid, int signo);
227
228 /* True if the low target can hardware single-step. Such targets
229 don't need a BREAKPOINT_REINSERT_ADDR callback. */
230
231 static int
232 can_hardware_single_step (void)
233 {
234 return (the_low_target.breakpoint_reinsert_addr == NULL);
235 }
236
237 /* True if the low target supports memory breakpoints. If so, we'll
238 have a GET_PC implementation. */
239
240 static int
241 supports_breakpoints (void)
242 {
243 return (the_low_target.get_pc != NULL);
244 }
245
246 /* Returns true if this target can support fast tracepoints. This
247 does not mean that the in-process agent has been loaded in the
248 inferior. */
249
250 static int
251 supports_fast_tracepoints (void)
252 {
253 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
254 }
255
256 /* True if LWP is stopped in its stepping range. */
257
258 static int
259 lwp_in_step_range (struct lwp_info *lwp)
260 {
261 CORE_ADDR pc = lwp->stop_pc;
262
263 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
264 }
265
266 struct pending_signals
267 {
268 int signal;
269 siginfo_t info;
270 struct pending_signals *prev;
271 };
272
273 /* The read/write ends of the pipe registered as waitable file in the
274 event loop. */
275 static int linux_event_pipe[2] = { -1, -1 };
276
277 /* True if we're currently in async mode. */
278 #define target_is_async_p() (linux_event_pipe[0] != -1)
279
280 static void send_sigstop (struct lwp_info *lwp);
281 static void wait_for_sigstop (void);
282
283 /* Return non-zero if HEADER is a 64-bit ELF file. */
284
285 static int
286 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
287 {
288 if (header->e_ident[EI_MAG0] == ELFMAG0
289 && header->e_ident[EI_MAG1] == ELFMAG1
290 && header->e_ident[EI_MAG2] == ELFMAG2
291 && header->e_ident[EI_MAG3] == ELFMAG3)
292 {
293 *machine = header->e_machine;
294 return header->e_ident[EI_CLASS] == ELFCLASS64;
295
296 }
297 *machine = EM_NONE;
298 return -1;
299 }
300
301 /* Return non-zero if FILE is a 64-bit ELF file,
302 zero if the file is not a 64-bit ELF file,
303 and -1 if the file is not accessible or doesn't exist. */
304
305 static int
306 elf_64_file_p (const char *file, unsigned int *machine)
307 {
308 Elf64_Ehdr header;
309 int fd;
310
311 fd = open (file, O_RDONLY);
312 if (fd < 0)
313 return -1;
314
315 if (read (fd, &header, sizeof (header)) != sizeof (header))
316 {
317 close (fd);
318 return 0;
319 }
320 close (fd);
321
322 return elf_64_header_p (&header, machine);
323 }
324
325 /* Accepts an integer PID; Returns true if the executable PID is
326 running is a 64-bit ELF file.. */
327
328 int
329 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
330 {
331 char file[PATH_MAX];
332
333 sprintf (file, "/proc/%d/exe", pid);
334 return elf_64_file_p (file, machine);
335 }
336
337 static void
338 delete_lwp (struct lwp_info *lwp)
339 {
340 struct thread_info *thr = get_lwp_thread (lwp);
341
342 if (debug_threads)
343 debug_printf ("deleting %ld\n", lwpid_of (thr));
344
345 remove_thread (thr);
346 free (lwp->arch_private);
347 free (lwp);
348 }
349
350 /* Add a process to the common process list, and set its private
351 data. */
352
353 static struct process_info *
354 linux_add_process (int pid, int attached)
355 {
356 struct process_info *proc;
357
358 proc = add_process (pid, attached);
359 proc->private = xcalloc (1, sizeof (*proc->private));
360
361 /* Set the arch when the first LWP stops. */
362 proc->private->new_inferior = 1;
363
364 if (the_low_target.new_process != NULL)
365 proc->private->arch_private = the_low_target.new_process ();
366
367 return proc;
368 }
369
370 /* Handle a GNU/Linux extended wait response. If we see a clone
371 event, we need to add the new LWP to our list (and not report the
372 trap to higher layers). */
373
374 static void
375 handle_extended_wait (struct lwp_info *event_child, int wstat)
376 {
377 int event = wstat >> 16;
378 struct thread_info *event_thr = get_lwp_thread (event_child);
379 struct lwp_info *new_lwp;
380
381 if (event == PTRACE_EVENT_CLONE)
382 {
383 ptid_t ptid;
384 unsigned long new_pid;
385 int ret, status;
386
387 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
388 &new_pid);
389
390 /* If we haven't already seen the new PID stop, wait for it now. */
391 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
392 {
393 /* The new child has a pending SIGSTOP. We can't affect it until it
394 hits the SIGSTOP, but we're already attached. */
395
396 ret = my_waitpid (new_pid, &status, __WALL);
397
398 if (ret == -1)
399 perror_with_name ("waiting for new child");
400 else if (ret != new_pid)
401 warning ("wait returned unexpected PID %d", ret);
402 else if (!WIFSTOPPED (status))
403 warning ("wait returned unexpected status 0x%x", status);
404 }
405
406 if (debug_threads)
407 debug_printf ("HEW: Got clone event "
408 "from LWP %ld, new child is LWP %ld\n",
409 lwpid_of (event_thr), new_pid);
410
411 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
412 new_lwp = add_lwp (ptid);
413
414 /* Either we're going to immediately resume the new thread
415 or leave it stopped. linux_resume_one_lwp is a nop if it
416 thinks the thread is currently running, so set this first
417 before calling linux_resume_one_lwp. */
418 new_lwp->stopped = 1;
419
420 /* If we're suspending all threads, leave this one suspended
421 too. */
422 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
423 new_lwp->suspended = 1;
424
425 /* Normally we will get the pending SIGSTOP. But in some cases
426 we might get another signal delivered to the group first.
427 If we do get another signal, be sure not to lose it. */
428 if (WSTOPSIG (status) == SIGSTOP)
429 {
430 if (stopping_threads != NOT_STOPPING_THREADS)
431 new_lwp->stop_pc = get_stop_pc (new_lwp);
432 else
433 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
434 }
435 else
436 {
437 new_lwp->stop_expected = 1;
438
439 if (stopping_threads != NOT_STOPPING_THREADS)
440 {
441 new_lwp->stop_pc = get_stop_pc (new_lwp);
442 new_lwp->status_pending_p = 1;
443 new_lwp->status_pending = status;
444 }
445 else
446 /* Pass the signal on. This is what GDB does - except
447 shouldn't we really report it instead? */
448 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
449 }
450
451 /* Always resume the current thread. If we are stopping
452 threads, it will have a pending SIGSTOP; we may as well
453 collect it now. */
454 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
455 }
456 }
457
458 /* Return the PC as read from the regcache of LWP, without any
459 adjustment. */
460
461 static CORE_ADDR
462 get_pc (struct lwp_info *lwp)
463 {
464 struct thread_info *saved_inferior;
465 struct regcache *regcache;
466 CORE_ADDR pc;
467
468 if (the_low_target.get_pc == NULL)
469 return 0;
470
471 saved_inferior = current_inferior;
472 current_inferior = get_lwp_thread (lwp);
473
474 regcache = get_thread_regcache (current_inferior, 1);
475 pc = (*the_low_target.get_pc) (regcache);
476
477 if (debug_threads)
478 debug_printf ("pc is 0x%lx\n", (long) pc);
479
480 current_inferior = saved_inferior;
481 return pc;
482 }
483
484 /* This function should only be called if LWP got a SIGTRAP.
485 The SIGTRAP could mean several things.
486
487 On i386, where decr_pc_after_break is non-zero:
488 If we were single-stepping this process using PTRACE_SINGLESTEP,
489 we will get only the one SIGTRAP (even if the instruction we
490 stepped over was a breakpoint). The value of $eip will be the
491 next instruction.
492 If we continue the process using PTRACE_CONT, we will get a
493 SIGTRAP when we hit a breakpoint. The value of $eip will be
494 the instruction after the breakpoint (i.e. needs to be
495 decremented). If we report the SIGTRAP to GDB, we must also
496 report the undecremented PC. If we cancel the SIGTRAP, we
497 must resume at the decremented PC.
498
499 (Presumably, not yet tested) On a non-decr_pc_after_break machine
500 with hardware or kernel single-step:
501 If we single-step over a breakpoint instruction, our PC will
502 point at the following instruction. If we continue and hit a
503 breakpoint instruction, our PC will point at the breakpoint
504 instruction. */
505
506 static CORE_ADDR
507 get_stop_pc (struct lwp_info *lwp)
508 {
509 CORE_ADDR stop_pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
513
514 stop_pc = get_pc (lwp);
515
516 if (WSTOPSIG (lwp->last_status) == SIGTRAP
517 && !lwp->stepping
518 && !lwp->stopped_by_watchpoint
519 && lwp->last_status >> 16 == 0)
520 stop_pc -= the_low_target.decr_pc_after_break;
521
522 if (debug_threads)
523 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
524
525 return stop_pc;
526 }
527
528 static struct lwp_info *
529 add_lwp (ptid_t ptid)
530 {
531 struct lwp_info *lwp;
532
533 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
534 memset (lwp, 0, sizeof (*lwp));
535
536 if (the_low_target.new_thread != NULL)
537 lwp->arch_private = the_low_target.new_thread ();
538
539 lwp->thread = add_thread (ptid, lwp);
540
541 return lwp;
542 }
543
544 /* Start an inferior process and returns its pid.
545 ALLARGS is a vector of program-name and args. */
546
547 static int
548 linux_create_inferior (char *program, char **allargs)
549 {
550 #ifdef HAVE_PERSONALITY
551 int personality_orig = 0, personality_set = 0;
552 #endif
553 struct lwp_info *new_lwp;
554 int pid;
555 ptid_t ptid;
556
557 #ifdef HAVE_PERSONALITY
558 if (disable_randomization)
559 {
560 errno = 0;
561 personality_orig = personality (0xffffffff);
562 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
563 {
564 personality_set = 1;
565 personality (personality_orig | ADDR_NO_RANDOMIZE);
566 }
567 if (errno != 0 || (personality_set
568 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
569 warning ("Error disabling address space randomization: %s",
570 strerror (errno));
571 }
572 #endif
573
574 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
575 pid = vfork ();
576 #else
577 pid = fork ();
578 #endif
579 if (pid < 0)
580 perror_with_name ("fork");
581
582 if (pid == 0)
583 {
584 close_most_fds ();
585 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
586
587 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
588 signal (__SIGRTMIN + 1, SIG_DFL);
589 #endif
590
591 setpgid (0, 0);
592
593 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
594 stdout to stderr so that inferior i/o doesn't corrupt the connection.
595 Also, redirect stdin to /dev/null. */
596 if (remote_connection_is_stdio ())
597 {
598 close (0);
599 open ("/dev/null", O_RDONLY);
600 dup2 (2, 1);
601 if (write (2, "stdin/stdout redirected\n",
602 sizeof ("stdin/stdout redirected\n") - 1) < 0)
603 {
604 /* Errors ignored. */;
605 }
606 }
607
608 execv (program, allargs);
609 if (errno == ENOENT)
610 execvp (program, allargs);
611
612 fprintf (stderr, "Cannot exec %s: %s.\n", program,
613 strerror (errno));
614 fflush (stderr);
615 _exit (0177);
616 }
617
618 #ifdef HAVE_PERSONALITY
619 if (personality_set)
620 {
621 errno = 0;
622 personality (personality_orig);
623 if (errno != 0)
624 warning ("Error restoring address space randomization: %s",
625 strerror (errno));
626 }
627 #endif
628
629 linux_add_process (pid, 0);
630
631 ptid = ptid_build (pid, pid, 0);
632 new_lwp = add_lwp (ptid);
633 new_lwp->must_set_ptrace_flags = 1;
634
635 return pid;
636 }
637
638 char *
639 linux_attach_fail_reason_string (ptid_t ptid, int err)
640 {
641 static char *reason_string;
642 struct buffer buffer;
643 char *warnings;
644 long lwpid = ptid_get_lwp (ptid);
645
646 xfree (reason_string);
647
648 buffer_init (&buffer);
649 linux_ptrace_attach_fail_reason (lwpid, &buffer);
650 buffer_grow_str0 (&buffer, "");
651 warnings = buffer_finish (&buffer);
652 if (warnings[0] != '\0')
653 reason_string = xstrprintf ("%s (%d), %s",
654 strerror (err), err, warnings);
655 else
656 reason_string = xstrprintf ("%s (%d)",
657 strerror (err), err);
658 xfree (warnings);
659 return reason_string;
660 }
661
662 /* Attach to an inferior process. */
663
664 int
665 linux_attach_lwp (ptid_t ptid)
666 {
667 struct lwp_info *new_lwp;
668 int lwpid = ptid_get_lwp (ptid);
669
670 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
671 != 0)
672 return errno;
673
674 new_lwp = add_lwp (ptid);
675
676 /* We need to wait for SIGSTOP before being able to make the next
677 ptrace call on this LWP. */
678 new_lwp->must_set_ptrace_flags = 1;
679
680 if (linux_proc_pid_is_stopped (lwpid))
681 {
682 if (debug_threads)
683 debug_printf ("Attached to a stopped process\n");
684
685 /* The process is definitely stopped. It is in a job control
686 stop, unless the kernel predates the TASK_STOPPED /
687 TASK_TRACED distinction, in which case it might be in a
688 ptrace stop. Make sure it is in a ptrace stop; from there we
689 can kill it, signal it, et cetera.
690
691 First make sure there is a pending SIGSTOP. Since we are
692 already attached, the process can not transition from stopped
693 to running without a PTRACE_CONT; so we know this signal will
694 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
695 probably already in the queue (unless this kernel is old
696 enough to use TASK_STOPPED for ptrace stops); but since
697 SIGSTOP is not an RT signal, it can only be queued once. */
698 kill_lwp (lwpid, SIGSTOP);
699
700 /* Finally, resume the stopped process. This will deliver the
701 SIGSTOP (or a higher priority signal, just like normal
702 PTRACE_ATTACH), which we'll catch later on. */
703 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
704 }
705
706 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
707 brings it to a halt.
708
709 There are several cases to consider here:
710
711 1) gdbserver has already attached to the process and is being notified
712 of a new thread that is being created.
713 In this case we should ignore that SIGSTOP and resume the
714 process. This is handled below by setting stop_expected = 1,
715 and the fact that add_thread sets last_resume_kind ==
716 resume_continue.
717
718 2) This is the first thread (the process thread), and we're attaching
719 to it via attach_inferior.
720 In this case we want the process thread to stop.
721 This is handled by having linux_attach set last_resume_kind ==
722 resume_stop after we return.
723
724 If the pid we are attaching to is also the tgid, we attach to and
725 stop all the existing threads. Otherwise, we attach to pid and
726 ignore any other threads in the same group as this pid.
727
728 3) GDB is connecting to gdbserver and is requesting an enumeration of all
729 existing threads.
730 In this case we want the thread to stop.
731 FIXME: This case is currently not properly handled.
732 We should wait for the SIGSTOP but don't. Things work apparently
733 because enough time passes between when we ptrace (ATTACH) and when
734 gdb makes the next ptrace call on the thread.
735
736 On the other hand, if we are currently trying to stop all threads, we
737 should treat the new thread as if we had sent it a SIGSTOP. This works
738 because we are guaranteed that the add_lwp call above added us to the
739 end of the list, and so the new thread has not yet reached
740 wait_for_sigstop (but will). */
741 new_lwp->stop_expected = 1;
742
743 return 0;
744 }
745
746 /* Attach to PID. If PID is the tgid, attach to it and all
747 of its threads. */
748
749 static int
750 linux_attach (unsigned long pid)
751 {
752 ptid_t ptid = ptid_build (pid, pid, 0);
753 int err;
754
755 /* Attach to PID. We will check for other threads
756 soon. */
757 err = linux_attach_lwp (ptid);
758 if (err != 0)
759 error ("Cannot attach to process %ld: %s",
760 pid, linux_attach_fail_reason_string (ptid, err));
761
762 linux_add_process (pid, 1);
763
764 if (!non_stop)
765 {
766 struct thread_info *thread;
767
768 /* Don't ignore the initial SIGSTOP if we just attached to this
769 process. It will be collected by wait shortly. */
770 thread = find_thread_ptid (ptid_build (pid, pid, 0));
771 thread->last_resume_kind = resume_stop;
772 }
773
774 if (linux_proc_get_tgid (pid) == pid)
775 {
776 DIR *dir;
777 char pathname[128];
778
779 sprintf (pathname, "/proc/%ld/task", pid);
780
781 dir = opendir (pathname);
782
783 if (!dir)
784 {
785 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
786 fflush (stderr);
787 }
788 else
789 {
790 /* At this point we attached to the tgid. Scan the task for
791 existing threads. */
792 int new_threads_found;
793 int iterations = 0;
794
795 while (iterations < 2)
796 {
797 struct dirent *dp;
798
799 new_threads_found = 0;
800 /* Add all the other threads. While we go through the
801 threads, new threads may be spawned. Cycle through
802 the list of threads until we have done two iterations without
803 finding new threads. */
804 while ((dp = readdir (dir)) != NULL)
805 {
806 unsigned long lwp;
807 ptid_t ptid;
808
809 /* Fetch one lwp. */
810 lwp = strtoul (dp->d_name, NULL, 10);
811
812 ptid = ptid_build (pid, lwp, 0);
813
814 /* Is this a new thread? */
815 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
816 {
817 int err;
818
819 if (debug_threads)
820 debug_printf ("Found new lwp %ld\n", lwp);
821
822 err = linux_attach_lwp (ptid);
823 if (err != 0)
824 warning ("Cannot attach to lwp %ld: %s",
825 lwp,
826 linux_attach_fail_reason_string (ptid, err));
827
828 new_threads_found++;
829 }
830 }
831
832 if (!new_threads_found)
833 iterations++;
834 else
835 iterations = 0;
836
837 rewinddir (dir);
838 }
839 closedir (dir);
840 }
841 }
842
843 return 0;
844 }
845
846 struct counter
847 {
848 int pid;
849 int count;
850 };
851
852 static int
853 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
854 {
855 struct counter *counter = args;
856
857 if (ptid_get_pid (entry->id) == counter->pid)
858 {
859 if (++counter->count > 1)
860 return 1;
861 }
862
863 return 0;
864 }
865
866 static int
867 last_thread_of_process_p (int pid)
868 {
869 struct counter counter = { pid , 0 };
870
871 return (find_inferior (&all_threads,
872 second_thread_of_pid_p, &counter) == NULL);
873 }
874
875 /* Kill LWP. */
876
877 static void
878 linux_kill_one_lwp (struct lwp_info *lwp)
879 {
880 struct thread_info *thr = get_lwp_thread (lwp);
881 int pid = lwpid_of (thr);
882
883 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
884 there is no signal context, and ptrace(PTRACE_KILL) (or
885 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
886 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
887 alternative is to kill with SIGKILL. We only need one SIGKILL
888 per process, not one for each thread. But since we still support
889 linuxthreads, and we also support debugging programs using raw
890 clone without CLONE_THREAD, we send one for each thread. For
891 years, we used PTRACE_KILL only, so we're being a bit paranoid
892 about some old kernels where PTRACE_KILL might work better
893 (dubious if there are any such, but that's why it's paranoia), so
894 we try SIGKILL first, PTRACE_KILL second, and so we're fine
895 everywhere. */
896
897 errno = 0;
898 kill (pid, SIGKILL);
899 if (debug_threads)
900 debug_printf ("LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
901 target_pid_to_str (ptid_of (thr)),
902 errno ? strerror (errno) : "OK");
903
904 errno = 0;
905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
906 if (debug_threads)
907 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
908 target_pid_to_str (ptid_of (thr)),
909 errno ? strerror (errno) : "OK");
910 }
911
912 /* Kill LWP and wait for it to die. */
913
914 static void
915 kill_wait_lwp (struct lwp_info *lwp)
916 {
917 struct thread_info *thr = get_lwp_thread (lwp);
918 int pid = ptid_get_pid (ptid_of (thr));
919 int lwpid = ptid_get_lwp (ptid_of (thr));
920 int wstat;
921 int res;
922
923 if (debug_threads)
924 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
925
926 do
927 {
928 linux_kill_one_lwp (lwp);
929
930 /* Make sure it died. Notes:
931
932 - The loop is most likely unnecessary.
933
934 - We don't use linux_wait_for_event as that could delete lwps
935 while we're iterating over them. We're not interested in
936 any pending status at this point, only in making sure all
937 wait status on the kernel side are collected until the
938 process is reaped.
939
940 - We don't use __WALL here as the __WALL emulation relies on
941 SIGCHLD, and killing a stopped process doesn't generate
942 one, nor an exit status.
943 */
944 res = my_waitpid (lwpid, &wstat, 0);
945 if (res == -1 && errno == ECHILD)
946 res = my_waitpid (lwpid, &wstat, __WCLONE);
947 } while (res > 0 && WIFSTOPPED (wstat));
948
949 gdb_assert (res > 0);
950 }
951
952 /* Callback for `find_inferior'. Kills an lwp of a given process,
953 except the leader. */
954
955 static int
956 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
957 {
958 struct thread_info *thread = (struct thread_info *) entry;
959 struct lwp_info *lwp = get_thread_lwp (thread);
960 int pid = * (int *) args;
961
962 if (ptid_get_pid (entry->id) != pid)
963 return 0;
964
965 /* We avoid killing the first thread here, because of a Linux kernel (at
966 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
967 the children get a chance to be reaped, it will remain a zombie
968 forever. */
969
970 if (lwpid_of (thread) == pid)
971 {
972 if (debug_threads)
973 debug_printf ("lkop: is last of process %s\n",
974 target_pid_to_str (entry->id));
975 return 0;
976 }
977
978 kill_wait_lwp (lwp);
979 return 0;
980 }
981
982 static int
983 linux_kill (int pid)
984 {
985 struct process_info *process;
986 struct lwp_info *lwp;
987
988 process = find_process_pid (pid);
989 if (process == NULL)
990 return -1;
991
992 /* If we're killing a running inferior, make sure it is stopped
993 first, as PTRACE_KILL will not work otherwise. */
994 stop_all_lwps (0, NULL);
995
996 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
997
998 /* See the comment in linux_kill_one_lwp. We did not kill the first
999 thread in the list, so do so now. */
1000 lwp = find_lwp_pid (pid_to_ptid (pid));
1001
1002 if (lwp == NULL)
1003 {
1004 if (debug_threads)
1005 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1006 pid);
1007 }
1008 else
1009 kill_wait_lwp (lwp);
1010
1011 the_target->mourn (process);
1012
1013 /* Since we presently can only stop all lwps of all processes, we
1014 need to unstop lwps of other processes. */
1015 unstop_all_lwps (0, NULL);
1016 return 0;
1017 }
1018
1019 /* Get pending signal of THREAD, for detaching purposes. This is the
1020 signal the thread last stopped for, which we need to deliver to the
1021 thread when detaching, otherwise, it'd be suppressed/lost. */
1022
1023 static int
1024 get_detach_signal (struct thread_info *thread)
1025 {
1026 enum gdb_signal signo = GDB_SIGNAL_0;
1027 int status;
1028 struct lwp_info *lp = get_thread_lwp (thread);
1029
1030 if (lp->status_pending_p)
1031 status = lp->status_pending;
1032 else
1033 {
1034 /* If the thread had been suspended by gdbserver, and it stopped
1035 cleanly, then it'll have stopped with SIGSTOP. But we don't
1036 want to deliver that SIGSTOP. */
1037 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1038 || thread->last_status.value.sig == GDB_SIGNAL_0)
1039 return 0;
1040
1041 /* Otherwise, we may need to deliver the signal we
1042 intercepted. */
1043 status = lp->last_status;
1044 }
1045
1046 if (!WIFSTOPPED (status))
1047 {
1048 if (debug_threads)
1049 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1050 target_pid_to_str (ptid_of (thread)));
1051 return 0;
1052 }
1053
1054 /* Extended wait statuses aren't real SIGTRAPs. */
1055 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1056 {
1057 if (debug_threads)
1058 debug_printf ("GPS: lwp %s had stopped with extended "
1059 "status: no pending signal\n",
1060 target_pid_to_str (ptid_of (thread)));
1061 return 0;
1062 }
1063
1064 signo = gdb_signal_from_host (WSTOPSIG (status));
1065
1066 if (program_signals_p && !program_signals[signo])
1067 {
1068 if (debug_threads)
1069 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1070 target_pid_to_str (ptid_of (thread)),
1071 gdb_signal_to_string (signo));
1072 return 0;
1073 }
1074 else if (!program_signals_p
1075 /* If we have no way to know which signals GDB does not
1076 want to have passed to the program, assume
1077 SIGTRAP/SIGINT, which is GDB's default. */
1078 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1079 {
1080 if (debug_threads)
1081 debug_printf ("GPS: lwp %s had signal %s, "
1082 "but we don't know if we should pass it. "
1083 "Default to not.\n",
1084 target_pid_to_str (ptid_of (thread)),
1085 gdb_signal_to_string (signo));
1086 return 0;
1087 }
1088 else
1089 {
1090 if (debug_threads)
1091 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1092 target_pid_to_str (ptid_of (thread)),
1093 gdb_signal_to_string (signo));
1094
1095 return WSTOPSIG (status);
1096 }
1097 }
1098
1099 static int
1100 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1101 {
1102 struct thread_info *thread = (struct thread_info *) entry;
1103 struct lwp_info *lwp = get_thread_lwp (thread);
1104 int pid = * (int *) args;
1105 int sig;
1106
1107 if (ptid_get_pid (entry->id) != pid)
1108 return 0;
1109
1110 /* If there is a pending SIGSTOP, get rid of it. */
1111 if (lwp->stop_expected)
1112 {
1113 if (debug_threads)
1114 debug_printf ("Sending SIGCONT to %s\n",
1115 target_pid_to_str (ptid_of (thread)));
1116
1117 kill_lwp (lwpid_of (thread), SIGCONT);
1118 lwp->stop_expected = 0;
1119 }
1120
1121 /* Flush any pending changes to the process's registers. */
1122 regcache_invalidate_thread (thread);
1123
1124 /* Pass on any pending signal for this thread. */
1125 sig = get_detach_signal (thread);
1126
1127 /* Finally, let it resume. */
1128 if (the_low_target.prepare_to_resume != NULL)
1129 the_low_target.prepare_to_resume (lwp);
1130 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1131 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1132 error (_("Can't detach %s: %s"),
1133 target_pid_to_str (ptid_of (thread)),
1134 strerror (errno));
1135
1136 delete_lwp (lwp);
1137 return 0;
1138 }
1139
1140 static int
1141 linux_detach (int pid)
1142 {
1143 struct process_info *process;
1144
1145 process = find_process_pid (pid);
1146 if (process == NULL)
1147 return -1;
1148
1149 /* Stop all threads before detaching. First, ptrace requires that
1150 the thread is stopped to sucessfully detach. Second, thread_db
1151 may need to uninstall thread event breakpoints from memory, which
1152 only works with a stopped process anyway. */
1153 stop_all_lwps (0, NULL);
1154
1155 #ifdef USE_THREAD_DB
1156 thread_db_detach (process);
1157 #endif
1158
1159 /* Stabilize threads (move out of jump pads). */
1160 stabilize_threads ();
1161
1162 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1163
1164 the_target->mourn (process);
1165
1166 /* Since we presently can only stop all lwps of all processes, we
1167 need to unstop lwps of other processes. */
1168 unstop_all_lwps (0, NULL);
1169 return 0;
1170 }
1171
1172 /* Remove all LWPs that belong to process PROC from the lwp list. */
1173
1174 static int
1175 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1176 {
1177 struct thread_info *thread = (struct thread_info *) entry;
1178 struct lwp_info *lwp = get_thread_lwp (thread);
1179 struct process_info *process = proc;
1180
1181 if (pid_of (thread) == pid_of (process))
1182 delete_lwp (lwp);
1183
1184 return 0;
1185 }
1186
1187 static void
1188 linux_mourn (struct process_info *process)
1189 {
1190 struct process_info_private *priv;
1191
1192 #ifdef USE_THREAD_DB
1193 thread_db_mourn (process);
1194 #endif
1195
1196 find_inferior (&all_threads, delete_lwp_callback, process);
1197
1198 /* Freeing all private data. */
1199 priv = process->private;
1200 free (priv->arch_private);
1201 free (priv);
1202 process->private = NULL;
1203
1204 remove_process (process);
1205 }
1206
1207 static void
1208 linux_join (int pid)
1209 {
1210 int status, ret;
1211
1212 do {
1213 ret = my_waitpid (pid, &status, 0);
1214 if (WIFEXITED (status) || WIFSIGNALED (status))
1215 break;
1216 } while (ret != -1 || errno != ECHILD);
1217 }
1218
1219 /* Return nonzero if the given thread is still alive. */
1220 static int
1221 linux_thread_alive (ptid_t ptid)
1222 {
1223 struct lwp_info *lwp = find_lwp_pid (ptid);
1224
1225 /* We assume we always know if a thread exits. If a whole process
1226 exited but we still haven't been able to report it to GDB, we'll
1227 hold on to the last lwp of the dead process. */
1228 if (lwp != NULL)
1229 return !lwp->dead;
1230 else
1231 return 0;
1232 }
1233
1234 /* Return 1 if this lwp has an interesting status pending. */
1235 static int
1236 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1237 {
1238 struct thread_info *thread = (struct thread_info *) entry;
1239 struct lwp_info *lwp = get_thread_lwp (thread);
1240 ptid_t ptid = * (ptid_t *) arg;
1241
1242 /* Check if we're only interested in events from a specific process
1243 or its lwps. */
1244 if (!ptid_equal (minus_one_ptid, ptid)
1245 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1246 return 0;
1247
1248 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1249 report any status pending the LWP may have. */
1250 if (thread->last_resume_kind == resume_stop
1251 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1252 return 0;
1253
1254 return lwp->status_pending_p;
1255 }
1256
1257 static int
1258 same_lwp (struct inferior_list_entry *entry, void *data)
1259 {
1260 ptid_t ptid = *(ptid_t *) data;
1261 int lwp;
1262
1263 if (ptid_get_lwp (ptid) != 0)
1264 lwp = ptid_get_lwp (ptid);
1265 else
1266 lwp = ptid_get_pid (ptid);
1267
1268 if (ptid_get_lwp (entry->id) == lwp)
1269 return 1;
1270
1271 return 0;
1272 }
1273
1274 struct lwp_info *
1275 find_lwp_pid (ptid_t ptid)
1276 {
1277 struct inferior_list_entry *thread
1278 = find_inferior (&all_threads, same_lwp, &ptid);
1279
1280 if (thread == NULL)
1281 return NULL;
1282
1283 return get_thread_lwp ((struct thread_info *) thread);
1284 }
1285
1286 /* Return the number of known LWPs in the tgid given by PID. */
1287
1288 static int
1289 num_lwps (int pid)
1290 {
1291 struct inferior_list_entry *inf, *tmp;
1292 int count = 0;
1293
1294 ALL_INFERIORS (&all_threads, inf, tmp)
1295 {
1296 if (ptid_get_pid (inf->id) == pid)
1297 count++;
1298 }
1299
1300 return count;
1301 }
1302
1303 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1304 their exits until all other threads in the group have exited. */
1305
1306 static void
1307 check_zombie_leaders (void)
1308 {
1309 struct process_info *proc, *tmp;
1310
1311 ALL_PROCESSES (proc, tmp)
1312 {
1313 pid_t leader_pid = pid_of (proc);
1314 struct lwp_info *leader_lp;
1315
1316 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1317
1318 if (debug_threads)
1319 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1320 "num_lwps=%d, zombie=%d\n",
1321 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1322 linux_proc_pid_is_zombie (leader_pid));
1323
1324 if (leader_lp != NULL
1325 /* Check if there are other threads in the group, as we may
1326 have raced with the inferior simply exiting. */
1327 && !last_thread_of_process_p (leader_pid)
1328 && linux_proc_pid_is_zombie (leader_pid))
1329 {
1330 /* A leader zombie can mean one of two things:
1331
1332 - It exited, and there's an exit status pending
1333 available, or only the leader exited (not the whole
1334 program). In the latter case, we can't waitpid the
1335 leader's exit status until all other threads are gone.
1336
1337 - There are 3 or more threads in the group, and a thread
1338 other than the leader exec'd. On an exec, the Linux
1339 kernel destroys all other threads (except the execing
1340 one) in the thread group, and resets the execing thread's
1341 tid to the tgid. No exit notification is sent for the
1342 execing thread -- from the ptracer's perspective, it
1343 appears as though the execing thread just vanishes.
1344 Until we reap all other threads except the leader and the
1345 execing thread, the leader will be zombie, and the
1346 execing thread will be in `D (disc sleep)'. As soon as
1347 all other threads are reaped, the execing thread changes
1348 it's tid to the tgid, and the previous (zombie) leader
1349 vanishes, giving place to the "new" leader. We could try
1350 distinguishing the exit and exec cases, by waiting once
1351 more, and seeing if something comes out, but it doesn't
1352 sound useful. The previous leader _does_ go away, and
1353 we'll re-add the new one once we see the exec event
1354 (which is just the same as what would happen if the
1355 previous leader did exit voluntarily before some other
1356 thread execs). */
1357
1358 if (debug_threads)
1359 fprintf (stderr,
1360 "CZL: Thread group leader %d zombie "
1361 "(it exited, or another thread execd).\n",
1362 leader_pid);
1363
1364 delete_lwp (leader_lp);
1365 }
1366 }
1367 }
1368
1369 /* Callback for `find_inferior'. Returns the first LWP that is not
1370 stopped. ARG is a PTID filter. */
1371
1372 static int
1373 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1374 {
1375 struct thread_info *thr = (struct thread_info *) entry;
1376 struct lwp_info *lwp;
1377 ptid_t filter = *(ptid_t *) arg;
1378
1379 if (!ptid_match (ptid_of (thr), filter))
1380 return 0;
1381
1382 lwp = get_thread_lwp (thr);
1383 if (!lwp->stopped)
1384 return 1;
1385
1386 return 0;
1387 }
1388
1389 /* This function should only be called if the LWP got a SIGTRAP.
1390
1391 Handle any tracepoint steps or hits. Return true if a tracepoint
1392 event was handled, 0 otherwise. */
1393
1394 static int
1395 handle_tracepoints (struct lwp_info *lwp)
1396 {
1397 struct thread_info *tinfo = get_lwp_thread (lwp);
1398 int tpoint_related_event = 0;
1399
1400 /* If this tracepoint hit causes a tracing stop, we'll immediately
1401 uninsert tracepoints. To do this, we temporarily pause all
1402 threads, unpatch away, and then unpause threads. We need to make
1403 sure the unpausing doesn't resume LWP too. */
1404 lwp->suspended++;
1405
1406 /* And we need to be sure that any all-threads-stopping doesn't try
1407 to move threads out of the jump pads, as it could deadlock the
1408 inferior (LWP could be in the jump pad, maybe even holding the
1409 lock.) */
1410
1411 /* Do any necessary step collect actions. */
1412 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1413
1414 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1415
1416 /* See if we just hit a tracepoint and do its main collect
1417 actions. */
1418 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1419
1420 lwp->suspended--;
1421
1422 gdb_assert (lwp->suspended == 0);
1423 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1424
1425 if (tpoint_related_event)
1426 {
1427 if (debug_threads)
1428 debug_printf ("got a tracepoint event\n");
1429 return 1;
1430 }
1431
1432 return 0;
1433 }
1434
1435 /* Convenience wrapper. Returns true if LWP is presently collecting a
1436 fast tracepoint. */
1437
1438 static int
1439 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1440 struct fast_tpoint_collect_status *status)
1441 {
1442 CORE_ADDR thread_area;
1443 struct thread_info *thread = get_lwp_thread (lwp);
1444
1445 if (the_low_target.get_thread_area == NULL)
1446 return 0;
1447
1448 /* Get the thread area address. This is used to recognize which
1449 thread is which when tracing with the in-process agent library.
1450 We don't read anything from the address, and treat it as opaque;
1451 it's the address itself that we assume is unique per-thread. */
1452 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1453 return 0;
1454
1455 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1456 }
1457
1458 /* The reason we resume in the caller, is because we want to be able
1459 to pass lwp->status_pending as WSTAT, and we need to clear
1460 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1461 refuses to resume. */
1462
1463 static int
1464 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1465 {
1466 struct thread_info *saved_inferior;
1467
1468 saved_inferior = current_inferior;
1469 current_inferior = get_lwp_thread (lwp);
1470
1471 if ((wstat == NULL
1472 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1473 && supports_fast_tracepoints ()
1474 && agent_loaded_p ())
1475 {
1476 struct fast_tpoint_collect_status status;
1477 int r;
1478
1479 if (debug_threads)
1480 debug_printf ("Checking whether LWP %ld needs to move out of the "
1481 "jump pad.\n",
1482 lwpid_of (current_inferior));
1483
1484 r = linux_fast_tracepoint_collecting (lwp, &status);
1485
1486 if (wstat == NULL
1487 || (WSTOPSIG (*wstat) != SIGILL
1488 && WSTOPSIG (*wstat) != SIGFPE
1489 && WSTOPSIG (*wstat) != SIGSEGV
1490 && WSTOPSIG (*wstat) != SIGBUS))
1491 {
1492 lwp->collecting_fast_tracepoint = r;
1493
1494 if (r != 0)
1495 {
1496 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1497 {
1498 /* Haven't executed the original instruction yet.
1499 Set breakpoint there, and wait till it's hit,
1500 then single-step until exiting the jump pad. */
1501 lwp->exit_jump_pad_bkpt
1502 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1503 }
1504
1505 if (debug_threads)
1506 debug_printf ("Checking whether LWP %ld needs to move out of "
1507 "the jump pad...it does\n",
1508 lwpid_of (current_inferior));
1509 current_inferior = saved_inferior;
1510
1511 return 1;
1512 }
1513 }
1514 else
1515 {
1516 /* If we get a synchronous signal while collecting, *and*
1517 while executing the (relocated) original instruction,
1518 reset the PC to point at the tpoint address, before
1519 reporting to GDB. Otherwise, it's an IPA lib bug: just
1520 report the signal to GDB, and pray for the best. */
1521
1522 lwp->collecting_fast_tracepoint = 0;
1523
1524 if (r != 0
1525 && (status.adjusted_insn_addr <= lwp->stop_pc
1526 && lwp->stop_pc < status.adjusted_insn_addr_end))
1527 {
1528 siginfo_t info;
1529 struct regcache *regcache;
1530
1531 /* The si_addr on a few signals references the address
1532 of the faulting instruction. Adjust that as
1533 well. */
1534 if ((WSTOPSIG (*wstat) == SIGILL
1535 || WSTOPSIG (*wstat) == SIGFPE
1536 || WSTOPSIG (*wstat) == SIGBUS
1537 || WSTOPSIG (*wstat) == SIGSEGV)
1538 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
1539 (PTRACE_TYPE_ARG3) 0, &info) == 0
1540 /* Final check just to make sure we don't clobber
1541 the siginfo of non-kernel-sent signals. */
1542 && (uintptr_t) info.si_addr == lwp->stop_pc)
1543 {
1544 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1545 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_inferior),
1546 (PTRACE_TYPE_ARG3) 0, &info);
1547 }
1548
1549 regcache = get_thread_regcache (current_inferior, 1);
1550 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1551 lwp->stop_pc = status.tpoint_addr;
1552
1553 /* Cancel any fast tracepoint lock this thread was
1554 holding. */
1555 force_unlock_trace_buffer ();
1556 }
1557
1558 if (lwp->exit_jump_pad_bkpt != NULL)
1559 {
1560 if (debug_threads)
1561 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1562 "stopping all threads momentarily.\n");
1563
1564 stop_all_lwps (1, lwp);
1565 cancel_breakpoints ();
1566
1567 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1568 lwp->exit_jump_pad_bkpt = NULL;
1569
1570 unstop_all_lwps (1, lwp);
1571
1572 gdb_assert (lwp->suspended >= 0);
1573 }
1574 }
1575 }
1576
1577 if (debug_threads)
1578 debug_printf ("Checking whether LWP %ld needs to move out of the "
1579 "jump pad...no\n",
1580 lwpid_of (current_inferior));
1581
1582 current_inferior = saved_inferior;
1583 return 0;
1584 }
1585
1586 /* Enqueue one signal in the "signals to report later when out of the
1587 jump pad" list. */
1588
1589 static void
1590 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1591 {
1592 struct pending_signals *p_sig;
1593 struct thread_info *thread = get_lwp_thread (lwp);
1594
1595 if (debug_threads)
1596 debug_printf ("Deferring signal %d for LWP %ld.\n",
1597 WSTOPSIG (*wstat), lwpid_of (thread));
1598
1599 if (debug_threads)
1600 {
1601 struct pending_signals *sig;
1602
1603 for (sig = lwp->pending_signals_to_report;
1604 sig != NULL;
1605 sig = sig->prev)
1606 debug_printf (" Already queued %d\n",
1607 sig->signal);
1608
1609 debug_printf (" (no more currently queued signals)\n");
1610 }
1611
1612 /* Don't enqueue non-RT signals if they are already in the deferred
1613 queue. (SIGSTOP being the easiest signal to see ending up here
1614 twice) */
1615 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1616 {
1617 struct pending_signals *sig;
1618
1619 for (sig = lwp->pending_signals_to_report;
1620 sig != NULL;
1621 sig = sig->prev)
1622 {
1623 if (sig->signal == WSTOPSIG (*wstat))
1624 {
1625 if (debug_threads)
1626 debug_printf ("Not requeuing already queued non-RT signal %d"
1627 " for LWP %ld\n",
1628 sig->signal,
1629 lwpid_of (thread));
1630 return;
1631 }
1632 }
1633 }
1634
1635 p_sig = xmalloc (sizeof (*p_sig));
1636 p_sig->prev = lwp->pending_signals_to_report;
1637 p_sig->signal = WSTOPSIG (*wstat);
1638 memset (&p_sig->info, 0, sizeof (siginfo_t));
1639 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1640 &p_sig->info);
1641
1642 lwp->pending_signals_to_report = p_sig;
1643 }
1644
1645 /* Dequeue one signal from the "signals to report later when out of
1646 the jump pad" list. */
1647
1648 static int
1649 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1650 {
1651 struct thread_info *thread = get_lwp_thread (lwp);
1652
1653 if (lwp->pending_signals_to_report != NULL)
1654 {
1655 struct pending_signals **p_sig;
1656
1657 p_sig = &lwp->pending_signals_to_report;
1658 while ((*p_sig)->prev != NULL)
1659 p_sig = &(*p_sig)->prev;
1660
1661 *wstat = W_STOPCODE ((*p_sig)->signal);
1662 if ((*p_sig)->info.si_signo != 0)
1663 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1664 &(*p_sig)->info);
1665 free (*p_sig);
1666 *p_sig = NULL;
1667
1668 if (debug_threads)
1669 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1670 WSTOPSIG (*wstat), lwpid_of (thread));
1671
1672 if (debug_threads)
1673 {
1674 struct pending_signals *sig;
1675
1676 for (sig = lwp->pending_signals_to_report;
1677 sig != NULL;
1678 sig = sig->prev)
1679 debug_printf (" Still queued %d\n",
1680 sig->signal);
1681
1682 debug_printf (" (no more queued signals)\n");
1683 }
1684
1685 return 1;
1686 }
1687
1688 return 0;
1689 }
1690
1691 /* Arrange for a breakpoint to be hit again later. We don't keep the
1692 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1693 will handle the current event, eventually we will resume this LWP,
1694 and this breakpoint will trap again. */
1695
1696 static int
1697 cancel_breakpoint (struct lwp_info *lwp)
1698 {
1699 struct thread_info *saved_inferior;
1700
1701 /* There's nothing to do if we don't support breakpoints. */
1702 if (!supports_breakpoints ())
1703 return 0;
1704
1705 /* breakpoint_at reads from current inferior. */
1706 saved_inferior = current_inferior;
1707 current_inferior = get_lwp_thread (lwp);
1708
1709 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1710 {
1711 if (debug_threads)
1712 debug_printf ("CB: Push back breakpoint for %s\n",
1713 target_pid_to_str (ptid_of (current_inferior)));
1714
1715 /* Back up the PC if necessary. */
1716 if (the_low_target.decr_pc_after_break)
1717 {
1718 struct regcache *regcache
1719 = get_thread_regcache (current_inferior, 1);
1720 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1721 }
1722
1723 current_inferior = saved_inferior;
1724 return 1;
1725 }
1726 else
1727 {
1728 if (debug_threads)
1729 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1730 paddress (lwp->stop_pc),
1731 target_pid_to_str (ptid_of (current_inferior)));
1732 }
1733
1734 current_inferior = saved_inferior;
1735 return 0;
1736 }
1737
1738 /* Do low-level handling of the event, and check if we should go on
1739 and pass it to caller code. Return the affected lwp if we are, or
1740 NULL otherwise. */
1741
1742 static struct lwp_info *
1743 linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1744 {
1745 struct lwp_info *child;
1746 struct thread_info *thread;
1747
1748 child = find_lwp_pid (pid_to_ptid (lwpid));
1749
1750 /* If we didn't find a process, one of two things presumably happened:
1751 - A process we started and then detached from has exited. Ignore it.
1752 - A process we are controlling has forked and the new child's stop
1753 was reported to us by the kernel. Save its PID. */
1754 if (child == NULL && WIFSTOPPED (wstat))
1755 {
1756 add_to_pid_list (&stopped_pids, lwpid, wstat);
1757 return NULL;
1758 }
1759 else if (child == NULL)
1760 return NULL;
1761
1762 thread = get_lwp_thread (child);
1763
1764 child->stopped = 1;
1765
1766 child->last_status = wstat;
1767
1768 if (WIFSTOPPED (wstat))
1769 {
1770 struct process_info *proc;
1771
1772 /* Architecture-specific setup after inferior is running. This
1773 needs to happen after we have attached to the inferior and it
1774 is stopped for the first time, but before we access any
1775 inferior registers. */
1776 proc = find_process_pid (pid_of (thread));
1777 if (proc->private->new_inferior)
1778 {
1779 struct thread_info *saved_inferior;
1780
1781 saved_inferior = current_inferior;
1782 current_inferior = thread;
1783
1784 the_low_target.arch_setup ();
1785
1786 current_inferior = saved_inferior;
1787
1788 proc->private->new_inferior = 0;
1789 }
1790 }
1791
1792 /* Store the STOP_PC, with adjustment applied. This depends on the
1793 architecture being defined already (so that CHILD has a valid
1794 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1795 not). */
1796 if (WIFSTOPPED (wstat))
1797 {
1798 if (debug_threads
1799 && the_low_target.get_pc != NULL)
1800 {
1801 struct thread_info *saved_inferior;
1802 struct regcache *regcache;
1803 CORE_ADDR pc;
1804
1805 saved_inferior = current_inferior;
1806 current_inferior = thread;
1807 regcache = get_thread_regcache (current_inferior, 1);
1808 pc = (*the_low_target.get_pc) (regcache);
1809 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
1810 current_inferior = saved_inferior;
1811 }
1812
1813 child->stop_pc = get_stop_pc (child);
1814 }
1815
1816 /* Fetch the possibly triggered data watchpoint info and store it in
1817 CHILD.
1818
1819 On some archs, like x86, that use debug registers to set
1820 watchpoints, it's possible that the way to know which watched
1821 address trapped, is to check the register that is used to select
1822 which address to watch. Problem is, between setting the
1823 watchpoint and reading back which data address trapped, the user
1824 may change the set of watchpoints, and, as a consequence, GDB
1825 changes the debug registers in the inferior. To avoid reading
1826 back a stale stopped-data-address when that happens, we cache in
1827 LP the fact that a watchpoint trapped, and the corresponding data
1828 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1829 changes the debug registers meanwhile, we have the cached data we
1830 can rely on. */
1831
1832 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1833 {
1834 if (the_low_target.stopped_by_watchpoint == NULL)
1835 {
1836 child->stopped_by_watchpoint = 0;
1837 }
1838 else
1839 {
1840 struct thread_info *saved_inferior;
1841
1842 saved_inferior = current_inferior;
1843 current_inferior = thread;
1844
1845 child->stopped_by_watchpoint
1846 = the_low_target.stopped_by_watchpoint ();
1847
1848 if (child->stopped_by_watchpoint)
1849 {
1850 if (the_low_target.stopped_data_address != NULL)
1851 child->stopped_data_address
1852 = the_low_target.stopped_data_address ();
1853 else
1854 child->stopped_data_address = 0;
1855 }
1856
1857 current_inferior = saved_inferior;
1858 }
1859 }
1860
1861 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1862 {
1863 linux_enable_event_reporting (lwpid);
1864 child->must_set_ptrace_flags = 0;
1865 }
1866
1867 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1868 && wstat >> 16 != 0)
1869 {
1870 handle_extended_wait (child, wstat);
1871 return NULL;
1872 }
1873
1874 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1875 && child->stop_expected)
1876 {
1877 if (debug_threads)
1878 debug_printf ("Expected stop.\n");
1879 child->stop_expected = 0;
1880
1881 if (thread->last_resume_kind == resume_stop)
1882 {
1883 /* We want to report the stop to the core. Treat the
1884 SIGSTOP as a normal event. */
1885 }
1886 else if (stopping_threads != NOT_STOPPING_THREADS)
1887 {
1888 /* Stopping threads. We don't want this SIGSTOP to end up
1889 pending in the FILTER_PTID handling below. */
1890 return NULL;
1891 }
1892 else
1893 {
1894 /* Filter out the event. */
1895 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1896 return NULL;
1897 }
1898 }
1899
1900 /* Check if the thread has exited. */
1901 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1902 && num_lwps (pid_of (thread)) > 1)
1903 {
1904 if (debug_threads)
1905 debug_printf ("LLW: %d exited.\n", lwpid);
1906
1907 /* If there is at least one more LWP, then the exit signal
1908 was not the end of the debugged application and should be
1909 ignored. */
1910 delete_lwp (child);
1911 return NULL;
1912 }
1913
1914 if (!ptid_match (ptid_of (thread), filter_ptid))
1915 {
1916 if (debug_threads)
1917 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1918 lwpid, wstat);
1919
1920 if (WIFSTOPPED (wstat))
1921 {
1922 child->status_pending_p = 1;
1923 child->status_pending = wstat;
1924
1925 if (WSTOPSIG (wstat) != SIGSTOP)
1926 {
1927 /* Cancel breakpoint hits. The breakpoint may be
1928 removed before we fetch events from this process to
1929 report to the core. It is best not to assume the
1930 moribund breakpoints heuristic always handles these
1931 cases --- it could be too many events go through to
1932 the core before this one is handled. All-stop always
1933 cancels breakpoint hits in all threads. */
1934 if (non_stop
1935 && WSTOPSIG (wstat) == SIGTRAP
1936 && cancel_breakpoint (child))
1937 {
1938 /* Throw away the SIGTRAP. */
1939 child->status_pending_p = 0;
1940
1941 if (debug_threads)
1942 debug_printf ("LLW: LWP %d hit a breakpoint while"
1943 " waiting for another process;"
1944 " cancelled it\n", lwpid);
1945 }
1946 }
1947 }
1948 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1949 {
1950 if (debug_threads)
1951 debug_printf ("LLWE: process %d exited while fetching "
1952 "event from another LWP\n", lwpid);
1953
1954 /* This was the last lwp in the process. Since events are
1955 serialized to GDB core, and we can't report this one
1956 right now, but GDB core and the other target layers will
1957 want to be notified about the exit code/signal, leave the
1958 status pending for the next time we're able to report
1959 it. */
1960 mark_lwp_dead (child, wstat);
1961 }
1962
1963 return NULL;
1964 }
1965
1966 return child;
1967 }
1968
1969 /* When the event-loop is doing a step-over, this points at the thread
1970 being stepped. */
1971 ptid_t step_over_bkpt;
1972
1973 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1974 match FILTER_PTID (leaving others pending). The PTIDs can be:
1975 minus_one_ptid, to specify any child; a pid PTID, specifying all
1976 lwps of a thread group; or a PTID representing a single lwp. Store
1977 the stop status through the status pointer WSTAT. OPTIONS is
1978 passed to the waitpid call. Return 0 if no event was found and
1979 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1980 was found. Return the PID of the stopped child otherwise. */
1981
1982 static int
1983 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1984 int *wstatp, int options)
1985 {
1986 struct thread_info *event_thread;
1987 struct lwp_info *event_child, *requested_child;
1988 sigset_t block_mask, prev_mask;
1989
1990 retry:
1991 /* N.B. event_thread points to the thread_info struct that contains
1992 event_child. Keep them in sync. */
1993 event_thread = NULL;
1994 event_child = NULL;
1995 requested_child = NULL;
1996
1997 /* Check for a lwp with a pending status. */
1998
1999 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2000 {
2001 event_thread = (struct thread_info *)
2002 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2003 if (event_thread != NULL)
2004 event_child = get_thread_lwp (event_thread);
2005 if (debug_threads && event_thread)
2006 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2007 }
2008 else if (!ptid_equal (filter_ptid, null_ptid))
2009 {
2010 requested_child = find_lwp_pid (filter_ptid);
2011
2012 if (stopping_threads == NOT_STOPPING_THREADS
2013 && requested_child->status_pending_p
2014 && requested_child->collecting_fast_tracepoint)
2015 {
2016 enqueue_one_deferred_signal (requested_child,
2017 &requested_child->status_pending);
2018 requested_child->status_pending_p = 0;
2019 requested_child->status_pending = 0;
2020 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2021 }
2022
2023 if (requested_child->suspended
2024 && requested_child->status_pending_p)
2025 fatal ("requesting an event out of a suspended child?");
2026
2027 if (requested_child->status_pending_p)
2028 {
2029 event_child = requested_child;
2030 event_thread = get_lwp_thread (event_child);
2031 }
2032 }
2033
2034 if (event_child != NULL)
2035 {
2036 if (debug_threads)
2037 debug_printf ("Got an event from pending child %ld (%04x)\n",
2038 lwpid_of (event_thread), event_child->status_pending);
2039 *wstatp = event_child->status_pending;
2040 event_child->status_pending_p = 0;
2041 event_child->status_pending = 0;
2042 current_inferior = event_thread;
2043 return lwpid_of (event_thread);
2044 }
2045
2046 /* But if we don't find a pending event, we'll have to wait.
2047
2048 We only enter this loop if no process has a pending wait status.
2049 Thus any action taken in response to a wait status inside this
2050 loop is responding as soon as we detect the status, not after any
2051 pending events. */
2052
2053 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2054 all signals while here. */
2055 sigfillset (&block_mask);
2056 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2057
2058 while (event_child == NULL)
2059 {
2060 pid_t ret = 0;
2061
2062 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2063 quirks:
2064
2065 - If the thread group leader exits while other threads in the
2066 thread group still exist, waitpid(TGID, ...) hangs. That
2067 waitpid won't return an exit status until the other threads
2068 in the group are reaped.
2069
2070 - When a non-leader thread execs, that thread just vanishes
2071 without reporting an exit (so we'd hang if we waited for it
2072 explicitly in that case). The exec event is reported to
2073 the TGID pid (although we don't currently enable exec
2074 events). */
2075 errno = 0;
2076 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2077
2078 if (debug_threads)
2079 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2080 ret, errno ? strerror (errno) : "ERRNO-OK");
2081
2082 if (ret > 0)
2083 {
2084 if (debug_threads)
2085 {
2086 debug_printf ("LLW: waitpid %ld received %s\n",
2087 (long) ret, status_to_str (*wstatp));
2088 }
2089
2090 event_child = linux_low_filter_event (filter_ptid,
2091 ret, *wstatp);
2092 if (event_child != NULL)
2093 {
2094 /* We got an event to report to the core. */
2095 event_thread = get_lwp_thread (event_child);
2096 break;
2097 }
2098
2099 /* Retry until nothing comes out of waitpid. A single
2100 SIGCHLD can indicate more than one child stopped. */
2101 continue;
2102 }
2103
2104 /* Check for zombie thread group leaders. Those can't be reaped
2105 until all other threads in the thread group are. */
2106 check_zombie_leaders ();
2107
2108 /* If there are no resumed children left in the set of LWPs we
2109 want to wait for, bail. We can't just block in
2110 waitpid/sigsuspend, because lwps might have been left stopped
2111 in trace-stop state, and we'd be stuck forever waiting for
2112 their status to change (which would only happen if we resumed
2113 them). Even if WNOHANG is set, this return code is preferred
2114 over 0 (below), as it is more detailed. */
2115 if ((find_inferior (&all_threads,
2116 not_stopped_callback,
2117 &wait_ptid) == NULL))
2118 {
2119 if (debug_threads)
2120 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2121 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2122 return -1;
2123 }
2124
2125 /* No interesting event to report to the caller. */
2126 if ((options & WNOHANG))
2127 {
2128 if (debug_threads)
2129 debug_printf ("WNOHANG set, no event found\n");
2130
2131 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2132 return 0;
2133 }
2134
2135 /* Block until we get an event reported with SIGCHLD. */
2136 if (debug_threads)
2137 debug_printf ("sigsuspend'ing\n");
2138
2139 sigsuspend (&prev_mask);
2140 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2141 goto retry;
2142 }
2143
2144 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2145
2146 current_inferior = event_thread;
2147
2148 /* Check for thread exit. */
2149 if (! WIFSTOPPED (*wstatp))
2150 {
2151 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2152
2153 if (debug_threads)
2154 debug_printf ("LWP %d is the last lwp of process. "
2155 "Process %ld exiting.\n",
2156 pid_of (event_thread), lwpid_of (event_thread));
2157 return lwpid_of (event_thread);
2158 }
2159
2160 return lwpid_of (event_thread);
2161 }
2162
2163 /* Wait for an event from child(ren) PTID. PTIDs can be:
2164 minus_one_ptid, to specify any child; a pid PTID, specifying all
2165 lwps of a thread group; or a PTID representing a single lwp. Store
2166 the stop status through the status pointer WSTAT. OPTIONS is
2167 passed to the waitpid call. Return 0 if no event was found and
2168 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2169 was found. Return the PID of the stopped child otherwise. */
2170
2171 static int
2172 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2173 {
2174 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2175 }
2176
2177 /* Count the LWP's that have had events. */
2178
2179 static int
2180 count_events_callback (struct inferior_list_entry *entry, void *data)
2181 {
2182 struct thread_info *thread = (struct thread_info *) entry;
2183 struct lwp_info *lp = get_thread_lwp (thread);
2184 int *count = data;
2185
2186 gdb_assert (count != NULL);
2187
2188 /* Count only resumed LWPs that have a SIGTRAP event pending that
2189 should be reported to GDB. */
2190 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2191 && thread->last_resume_kind != resume_stop
2192 && lp->status_pending_p
2193 && WIFSTOPPED (lp->status_pending)
2194 && WSTOPSIG (lp->status_pending) == SIGTRAP
2195 && !breakpoint_inserted_here (lp->stop_pc))
2196 (*count)++;
2197
2198 return 0;
2199 }
2200
2201 /* Select the LWP (if any) that is currently being single-stepped. */
2202
2203 static int
2204 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2205 {
2206 struct thread_info *thread = (struct thread_info *) entry;
2207 struct lwp_info *lp = get_thread_lwp (thread);
2208
2209 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2210 && thread->last_resume_kind == resume_step
2211 && lp->status_pending_p)
2212 return 1;
2213 else
2214 return 0;
2215 }
2216
2217 /* Select the Nth LWP that has had a SIGTRAP event that should be
2218 reported to GDB. */
2219
2220 static int
2221 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2222 {
2223 struct thread_info *thread = (struct thread_info *) entry;
2224 struct lwp_info *lp = get_thread_lwp (thread);
2225 int *selector = data;
2226
2227 gdb_assert (selector != NULL);
2228
2229 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2230 if (thread->last_resume_kind != resume_stop
2231 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2232 && lp->status_pending_p
2233 && WIFSTOPPED (lp->status_pending)
2234 && WSTOPSIG (lp->status_pending) == SIGTRAP
2235 && !breakpoint_inserted_here (lp->stop_pc))
2236 if ((*selector)-- == 0)
2237 return 1;
2238
2239 return 0;
2240 }
2241
2242 static int
2243 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2244 {
2245 struct thread_info *thread = (struct thread_info *) entry;
2246 struct lwp_info *lp = get_thread_lwp (thread);
2247 struct lwp_info *event_lp = data;
2248
2249 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2250 if (lp == event_lp)
2251 return 0;
2252
2253 /* If a LWP other than the LWP that we're reporting an event for has
2254 hit a GDB breakpoint (as opposed to some random trap signal),
2255 then just arrange for it to hit it again later. We don't keep
2256 the SIGTRAP status and don't forward the SIGTRAP signal to the
2257 LWP. We will handle the current event, eventually we will resume
2258 all LWPs, and this one will get its breakpoint trap again.
2259
2260 If we do not do this, then we run the risk that the user will
2261 delete or disable the breakpoint, but the LWP will have already
2262 tripped on it. */
2263
2264 if (thread->last_resume_kind != resume_stop
2265 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2266 && lp->status_pending_p
2267 && WIFSTOPPED (lp->status_pending)
2268 && WSTOPSIG (lp->status_pending) == SIGTRAP
2269 && !lp->stepping
2270 && !lp->stopped_by_watchpoint
2271 && cancel_breakpoint (lp))
2272 /* Throw away the SIGTRAP. */
2273 lp->status_pending_p = 0;
2274
2275 return 0;
2276 }
2277
2278 static void
2279 linux_cancel_breakpoints (void)
2280 {
2281 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
2282 }
2283
2284 /* Select one LWP out of those that have events pending. */
2285
2286 static void
2287 select_event_lwp (struct lwp_info **orig_lp)
2288 {
2289 int num_events = 0;
2290 int random_selector;
2291 struct thread_info *event_thread;
2292
2293 /* Give preference to any LWP that is being single-stepped. */
2294 event_thread
2295 = (struct thread_info *) find_inferior (&all_threads,
2296 select_singlestep_lwp_callback,
2297 NULL);
2298 if (event_thread != NULL)
2299 {
2300 if (debug_threads)
2301 debug_printf ("SEL: Select single-step %s\n",
2302 target_pid_to_str (ptid_of (event_thread)));
2303 }
2304 else
2305 {
2306 /* No single-stepping LWP. Select one at random, out of those
2307 which have had SIGTRAP events. */
2308
2309 /* First see how many SIGTRAP events we have. */
2310 find_inferior (&all_threads, count_events_callback, &num_events);
2311
2312 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2313 random_selector = (int)
2314 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2315
2316 if (debug_threads && num_events > 1)
2317 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2318 num_events, random_selector);
2319
2320 event_thread
2321 = (struct thread_info *) find_inferior (&all_threads,
2322 select_event_lwp_callback,
2323 &random_selector);
2324 }
2325
2326 if (event_thread != NULL)
2327 {
2328 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2329
2330 /* Switch the event LWP. */
2331 *orig_lp = event_lp;
2332 }
2333 }
2334
2335 /* Decrement the suspend count of an LWP. */
2336
2337 static int
2338 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2339 {
2340 struct thread_info *thread = (struct thread_info *) entry;
2341 struct lwp_info *lwp = get_thread_lwp (thread);
2342
2343 /* Ignore EXCEPT. */
2344 if (lwp == except)
2345 return 0;
2346
2347 lwp->suspended--;
2348
2349 gdb_assert (lwp->suspended >= 0);
2350 return 0;
2351 }
2352
2353 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2354 NULL. */
2355
2356 static void
2357 unsuspend_all_lwps (struct lwp_info *except)
2358 {
2359 find_inferior (&all_threads, unsuspend_one_lwp, except);
2360 }
2361
2362 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2363 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2364 void *data);
2365 static int lwp_running (struct inferior_list_entry *entry, void *data);
2366 static ptid_t linux_wait_1 (ptid_t ptid,
2367 struct target_waitstatus *ourstatus,
2368 int target_options);
2369
2370 /* Stabilize threads (move out of jump pads).
2371
2372 If a thread is midway collecting a fast tracepoint, we need to
2373 finish the collection and move it out of the jump pad before
2374 reporting the signal.
2375
2376 This avoids recursion while collecting (when a signal arrives
2377 midway, and the signal handler itself collects), which would trash
2378 the trace buffer. In case the user set a breakpoint in a signal
2379 handler, this avoids the backtrace showing the jump pad, etc..
2380 Most importantly, there are certain things we can't do safely if
2381 threads are stopped in a jump pad (or in its callee's). For
2382 example:
2383
2384 - starting a new trace run. A thread still collecting the
2385 previous run, could trash the trace buffer when resumed. The trace
2386 buffer control structures would have been reset but the thread had
2387 no way to tell. The thread could even midway memcpy'ing to the
2388 buffer, which would mean that when resumed, it would clobber the
2389 trace buffer that had been set for a new run.
2390
2391 - we can't rewrite/reuse the jump pads for new tracepoints
2392 safely. Say you do tstart while a thread is stopped midway while
2393 collecting. When the thread is later resumed, it finishes the
2394 collection, and returns to the jump pad, to execute the original
2395 instruction that was under the tracepoint jump at the time the
2396 older run had been started. If the jump pad had been rewritten
2397 since for something else in the new run, the thread would now
2398 execute the wrong / random instructions. */
2399
2400 static void
2401 linux_stabilize_threads (void)
2402 {
2403 struct thread_info *save_inferior;
2404 struct thread_info *thread_stuck;
2405
2406 thread_stuck
2407 = (struct thread_info *) find_inferior (&all_threads,
2408 stuck_in_jump_pad_callback,
2409 NULL);
2410 if (thread_stuck != NULL)
2411 {
2412 if (debug_threads)
2413 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2414 lwpid_of (thread_stuck));
2415 return;
2416 }
2417
2418 save_inferior = current_inferior;
2419
2420 stabilizing_threads = 1;
2421
2422 /* Kick 'em all. */
2423 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2424
2425 /* Loop until all are stopped out of the jump pads. */
2426 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2427 {
2428 struct target_waitstatus ourstatus;
2429 struct lwp_info *lwp;
2430 int wstat;
2431
2432 /* Note that we go through the full wait even loop. While
2433 moving threads out of jump pad, we need to be able to step
2434 over internal breakpoints and such. */
2435 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2436
2437 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2438 {
2439 lwp = get_thread_lwp (current_inferior);
2440
2441 /* Lock it. */
2442 lwp->suspended++;
2443
2444 if (ourstatus.value.sig != GDB_SIGNAL_0
2445 || current_inferior->last_resume_kind == resume_stop)
2446 {
2447 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2448 enqueue_one_deferred_signal (lwp, &wstat);
2449 }
2450 }
2451 }
2452
2453 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2454
2455 stabilizing_threads = 0;
2456
2457 current_inferior = save_inferior;
2458
2459 if (debug_threads)
2460 {
2461 thread_stuck
2462 = (struct thread_info *) find_inferior (&all_threads,
2463 stuck_in_jump_pad_callback,
2464 NULL);
2465 if (thread_stuck != NULL)
2466 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2467 lwpid_of (thread_stuck));
2468 }
2469 }
2470
2471 /* Wait for process, returns status. */
2472
2473 static ptid_t
2474 linux_wait_1 (ptid_t ptid,
2475 struct target_waitstatus *ourstatus, int target_options)
2476 {
2477 int w;
2478 struct lwp_info *event_child;
2479 int options;
2480 int pid;
2481 int step_over_finished;
2482 int bp_explains_trap;
2483 int maybe_internal_trap;
2484 int report_to_gdb;
2485 int trace_event;
2486 int in_step_range;
2487
2488 if (debug_threads)
2489 {
2490 debug_enter ();
2491 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2492 }
2493
2494 /* Translate generic target options into linux options. */
2495 options = __WALL;
2496 if (target_options & TARGET_WNOHANG)
2497 options |= WNOHANG;
2498
2499 retry:
2500 bp_explains_trap = 0;
2501 trace_event = 0;
2502 in_step_range = 0;
2503 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2504
2505 /* If we were only supposed to resume one thread, only wait for
2506 that thread - if it's still alive. If it died, however - which
2507 can happen if we're coming from the thread death case below -
2508 then we need to make sure we restart the other threads. We could
2509 pick a thread at random or restart all; restarting all is less
2510 arbitrary. */
2511 if (!non_stop
2512 && !ptid_equal (cont_thread, null_ptid)
2513 && !ptid_equal (cont_thread, minus_one_ptid))
2514 {
2515 struct thread_info *thread;
2516
2517 thread = (struct thread_info *) find_inferior_id (&all_threads,
2518 cont_thread);
2519
2520 /* No stepping, no signal - unless one is pending already, of course. */
2521 if (thread == NULL)
2522 {
2523 struct thread_resume resume_info;
2524 resume_info.thread = minus_one_ptid;
2525 resume_info.kind = resume_continue;
2526 resume_info.sig = 0;
2527 linux_resume (&resume_info, 1);
2528 }
2529 else
2530 ptid = cont_thread;
2531 }
2532
2533 if (ptid_equal (step_over_bkpt, null_ptid))
2534 pid = linux_wait_for_event (ptid, &w, options);
2535 else
2536 {
2537 if (debug_threads)
2538 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2539 target_pid_to_str (step_over_bkpt));
2540 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2541 }
2542
2543 if (pid == 0)
2544 {
2545 gdb_assert (target_options & TARGET_WNOHANG);
2546
2547 if (debug_threads)
2548 {
2549 debug_printf ("linux_wait_1 ret = null_ptid, "
2550 "TARGET_WAITKIND_IGNORE\n");
2551 debug_exit ();
2552 }
2553
2554 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2555 return null_ptid;
2556 }
2557 else if (pid == -1)
2558 {
2559 if (debug_threads)
2560 {
2561 debug_printf ("linux_wait_1 ret = null_ptid, "
2562 "TARGET_WAITKIND_NO_RESUMED\n");
2563 debug_exit ();
2564 }
2565
2566 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2567 return null_ptid;
2568 }
2569
2570 event_child = get_thread_lwp (current_inferior);
2571
2572 /* linux_wait_for_event only returns an exit status for the last
2573 child of a process. Report it. */
2574 if (WIFEXITED (w) || WIFSIGNALED (w))
2575 {
2576 if (WIFEXITED (w))
2577 {
2578 ourstatus->kind = TARGET_WAITKIND_EXITED;
2579 ourstatus->value.integer = WEXITSTATUS (w);
2580
2581 if (debug_threads)
2582 {
2583 debug_printf ("linux_wait_1 ret = %s, exited with "
2584 "retcode %d\n",
2585 target_pid_to_str (ptid_of (current_inferior)),
2586 WEXITSTATUS (w));
2587 debug_exit ();
2588 }
2589 }
2590 else
2591 {
2592 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2593 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2594
2595 if (debug_threads)
2596 {
2597 debug_printf ("linux_wait_1 ret = %s, terminated with "
2598 "signal %d\n",
2599 target_pid_to_str (ptid_of (current_inferior)),
2600 WTERMSIG (w));
2601 debug_exit ();
2602 }
2603 }
2604
2605 return ptid_of (current_inferior);
2606 }
2607
2608 /* If this event was not handled before, and is not a SIGTRAP, we
2609 report it. SIGILL and SIGSEGV are also treated as traps in case
2610 a breakpoint is inserted at the current PC. If this target does
2611 not support internal breakpoints at all, we also report the
2612 SIGTRAP without further processing; it's of no concern to us. */
2613 maybe_internal_trap
2614 = (supports_breakpoints ()
2615 && (WSTOPSIG (w) == SIGTRAP
2616 || ((WSTOPSIG (w) == SIGILL
2617 || WSTOPSIG (w) == SIGSEGV)
2618 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2619
2620 if (maybe_internal_trap)
2621 {
2622 /* Handle anything that requires bookkeeping before deciding to
2623 report the event or continue waiting. */
2624
2625 /* First check if we can explain the SIGTRAP with an internal
2626 breakpoint, or if we should possibly report the event to GDB.
2627 Do this before anything that may remove or insert a
2628 breakpoint. */
2629 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2630
2631 /* We have a SIGTRAP, possibly a step-over dance has just
2632 finished. If so, tweak the state machine accordingly,
2633 reinsert breakpoints and delete any reinsert (software
2634 single-step) breakpoints. */
2635 step_over_finished = finish_step_over (event_child);
2636
2637 /* Now invoke the callbacks of any internal breakpoints there. */
2638 check_breakpoints (event_child->stop_pc);
2639
2640 /* Handle tracepoint data collecting. This may overflow the
2641 trace buffer, and cause a tracing stop, removing
2642 breakpoints. */
2643 trace_event = handle_tracepoints (event_child);
2644
2645 if (bp_explains_trap)
2646 {
2647 /* If we stepped or ran into an internal breakpoint, we've
2648 already handled it. So next time we resume (from this
2649 PC), we should step over it. */
2650 if (debug_threads)
2651 debug_printf ("Hit a gdbserver breakpoint.\n");
2652
2653 if (breakpoint_here (event_child->stop_pc))
2654 event_child->need_step_over = 1;
2655 }
2656 }
2657 else
2658 {
2659 /* We have some other signal, possibly a step-over dance was in
2660 progress, and it should be cancelled too. */
2661 step_over_finished = finish_step_over (event_child);
2662 }
2663
2664 /* We have all the data we need. Either report the event to GDB, or
2665 resume threads and keep waiting for more. */
2666
2667 /* If we're collecting a fast tracepoint, finish the collection and
2668 move out of the jump pad before delivering a signal. See
2669 linux_stabilize_threads. */
2670
2671 if (WIFSTOPPED (w)
2672 && WSTOPSIG (w) != SIGTRAP
2673 && supports_fast_tracepoints ()
2674 && agent_loaded_p ())
2675 {
2676 if (debug_threads)
2677 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2678 "to defer or adjust it.\n",
2679 WSTOPSIG (w), lwpid_of (current_inferior));
2680
2681 /* Allow debugging the jump pad itself. */
2682 if (current_inferior->last_resume_kind != resume_step
2683 && maybe_move_out_of_jump_pad (event_child, &w))
2684 {
2685 enqueue_one_deferred_signal (event_child, &w);
2686
2687 if (debug_threads)
2688 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2689 WSTOPSIG (w), lwpid_of (current_inferior));
2690
2691 linux_resume_one_lwp (event_child, 0, 0, NULL);
2692 goto retry;
2693 }
2694 }
2695
2696 if (event_child->collecting_fast_tracepoint)
2697 {
2698 if (debug_threads)
2699 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2700 "Check if we're already there.\n",
2701 lwpid_of (current_inferior),
2702 event_child->collecting_fast_tracepoint);
2703
2704 trace_event = 1;
2705
2706 event_child->collecting_fast_tracepoint
2707 = linux_fast_tracepoint_collecting (event_child, NULL);
2708
2709 if (event_child->collecting_fast_tracepoint != 1)
2710 {
2711 /* No longer need this breakpoint. */
2712 if (event_child->exit_jump_pad_bkpt != NULL)
2713 {
2714 if (debug_threads)
2715 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2716 "stopping all threads momentarily.\n");
2717
2718 /* Other running threads could hit this breakpoint.
2719 We don't handle moribund locations like GDB does,
2720 instead we always pause all threads when removing
2721 breakpoints, so that any step-over or
2722 decr_pc_after_break adjustment is always taken
2723 care of while the breakpoint is still
2724 inserted. */
2725 stop_all_lwps (1, event_child);
2726 cancel_breakpoints ();
2727
2728 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2729 event_child->exit_jump_pad_bkpt = NULL;
2730
2731 unstop_all_lwps (1, event_child);
2732
2733 gdb_assert (event_child->suspended >= 0);
2734 }
2735 }
2736
2737 if (event_child->collecting_fast_tracepoint == 0)
2738 {
2739 if (debug_threads)
2740 debug_printf ("fast tracepoint finished "
2741 "collecting successfully.\n");
2742
2743 /* We may have a deferred signal to report. */
2744 if (dequeue_one_deferred_signal (event_child, &w))
2745 {
2746 if (debug_threads)
2747 debug_printf ("dequeued one signal.\n");
2748 }
2749 else
2750 {
2751 if (debug_threads)
2752 debug_printf ("no deferred signals.\n");
2753
2754 if (stabilizing_threads)
2755 {
2756 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2757 ourstatus->value.sig = GDB_SIGNAL_0;
2758
2759 if (debug_threads)
2760 {
2761 debug_printf ("linux_wait_1 ret = %s, stopped "
2762 "while stabilizing threads\n",
2763 target_pid_to_str (ptid_of (current_inferior)));
2764 debug_exit ();
2765 }
2766
2767 return ptid_of (current_inferior);
2768 }
2769 }
2770 }
2771 }
2772
2773 /* Check whether GDB would be interested in this event. */
2774
2775 /* If GDB is not interested in this signal, don't stop other
2776 threads, and don't report it to GDB. Just resume the inferior
2777 right away. We do this for threading-related signals as well as
2778 any that GDB specifically requested we ignore. But never ignore
2779 SIGSTOP if we sent it ourselves, and do not ignore signals when
2780 stepping - they may require special handling to skip the signal
2781 handler. */
2782 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2783 thread library? */
2784 if (WIFSTOPPED (w)
2785 && current_inferior->last_resume_kind != resume_step
2786 && (
2787 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2788 (current_process ()->private->thread_db != NULL
2789 && (WSTOPSIG (w) == __SIGRTMIN
2790 || WSTOPSIG (w) == __SIGRTMIN + 1))
2791 ||
2792 #endif
2793 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2794 && !(WSTOPSIG (w) == SIGSTOP
2795 && current_inferior->last_resume_kind == resume_stop))))
2796 {
2797 siginfo_t info, *info_p;
2798
2799 if (debug_threads)
2800 debug_printf ("Ignored signal %d for LWP %ld.\n",
2801 WSTOPSIG (w), lwpid_of (current_inferior));
2802
2803 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
2804 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2805 info_p = &info;
2806 else
2807 info_p = NULL;
2808 linux_resume_one_lwp (event_child, event_child->stepping,
2809 WSTOPSIG (w), info_p);
2810 goto retry;
2811 }
2812
2813 /* Note that all addresses are always "out of the step range" when
2814 there's no range to begin with. */
2815 in_step_range = lwp_in_step_range (event_child);
2816
2817 /* If GDB wanted this thread to single step, and the thread is out
2818 of the step range, we always want to report the SIGTRAP, and let
2819 GDB handle it. Watchpoints should always be reported. So should
2820 signals we can't explain. A SIGTRAP we can't explain could be a
2821 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2822 do, we're be able to handle GDB breakpoints on top of internal
2823 breakpoints, by handling the internal breakpoint and still
2824 reporting the event to GDB. If we don't, we're out of luck, GDB
2825 won't see the breakpoint hit. */
2826 report_to_gdb = (!maybe_internal_trap
2827 || (current_inferior->last_resume_kind == resume_step
2828 && !in_step_range)
2829 || event_child->stopped_by_watchpoint
2830 || (!step_over_finished && !in_step_range
2831 && !bp_explains_trap && !trace_event)
2832 || (gdb_breakpoint_here (event_child->stop_pc)
2833 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2834 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2835
2836 run_breakpoint_commands (event_child->stop_pc);
2837
2838 /* We found no reason GDB would want us to stop. We either hit one
2839 of our own breakpoints, or finished an internal step GDB
2840 shouldn't know about. */
2841 if (!report_to_gdb)
2842 {
2843 if (debug_threads)
2844 {
2845 if (bp_explains_trap)
2846 debug_printf ("Hit a gdbserver breakpoint.\n");
2847 if (step_over_finished)
2848 debug_printf ("Step-over finished.\n");
2849 if (trace_event)
2850 debug_printf ("Tracepoint event.\n");
2851 if (lwp_in_step_range (event_child))
2852 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2853 paddress (event_child->stop_pc),
2854 paddress (event_child->step_range_start),
2855 paddress (event_child->step_range_end));
2856 }
2857
2858 /* We're not reporting this breakpoint to GDB, so apply the
2859 decr_pc_after_break adjustment to the inferior's regcache
2860 ourselves. */
2861
2862 if (the_low_target.set_pc != NULL)
2863 {
2864 struct regcache *regcache
2865 = get_thread_regcache (current_inferior, 1);
2866 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2867 }
2868
2869 /* We may have finished stepping over a breakpoint. If so,
2870 we've stopped and suspended all LWPs momentarily except the
2871 stepping one. This is where we resume them all again. We're
2872 going to keep waiting, so use proceed, which handles stepping
2873 over the next breakpoint. */
2874 if (debug_threads)
2875 debug_printf ("proceeding all threads.\n");
2876
2877 if (step_over_finished)
2878 unsuspend_all_lwps (event_child);
2879
2880 proceed_all_lwps ();
2881 goto retry;
2882 }
2883
2884 if (debug_threads)
2885 {
2886 if (current_inferior->last_resume_kind == resume_step)
2887 {
2888 if (event_child->step_range_start == event_child->step_range_end)
2889 debug_printf ("GDB wanted to single-step, reporting event.\n");
2890 else if (!lwp_in_step_range (event_child))
2891 debug_printf ("Out of step range, reporting event.\n");
2892 }
2893 if (event_child->stopped_by_watchpoint)
2894 debug_printf ("Stopped by watchpoint.\n");
2895 if (gdb_breakpoint_here (event_child->stop_pc))
2896 debug_printf ("Stopped by GDB breakpoint.\n");
2897 if (debug_threads)
2898 debug_printf ("Hit a non-gdbserver trap event.\n");
2899 }
2900
2901 /* Alright, we're going to report a stop. */
2902
2903 if (!non_stop && !stabilizing_threads)
2904 {
2905 /* In all-stop, stop all threads. */
2906 stop_all_lwps (0, NULL);
2907
2908 /* If we're not waiting for a specific LWP, choose an event LWP
2909 from among those that have had events. Giving equal priority
2910 to all LWPs that have had events helps prevent
2911 starvation. */
2912 if (ptid_equal (ptid, minus_one_ptid))
2913 {
2914 event_child->status_pending_p = 1;
2915 event_child->status_pending = w;
2916
2917 select_event_lwp (&event_child);
2918
2919 /* current_inferior and event_child must stay in sync. */
2920 current_inferior = get_lwp_thread (event_child);
2921
2922 event_child->status_pending_p = 0;
2923 w = event_child->status_pending;
2924 }
2925
2926 /* Now that we've selected our final event LWP, cancel any
2927 breakpoints in other LWPs that have hit a GDB breakpoint.
2928 See the comment in cancel_breakpoints_callback to find out
2929 why. */
2930 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
2931
2932 /* If we were going a step-over, all other threads but the stepping one
2933 had been paused in start_step_over, with their suspend counts
2934 incremented. We don't want to do a full unstop/unpause, because we're
2935 in all-stop mode (so we want threads stopped), but we still need to
2936 unsuspend the other threads, to decrement their `suspended' count
2937 back. */
2938 if (step_over_finished)
2939 unsuspend_all_lwps (event_child);
2940
2941 /* Stabilize threads (move out of jump pads). */
2942 stabilize_threads ();
2943 }
2944 else
2945 {
2946 /* If we just finished a step-over, then all threads had been
2947 momentarily paused. In all-stop, that's fine, we want
2948 threads stopped by now anyway. In non-stop, we need to
2949 re-resume threads that GDB wanted to be running. */
2950 if (step_over_finished)
2951 unstop_all_lwps (1, event_child);
2952 }
2953
2954 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2955
2956 if (current_inferior->last_resume_kind == resume_stop
2957 && WSTOPSIG (w) == SIGSTOP)
2958 {
2959 /* A thread that has been requested to stop by GDB with vCont;t,
2960 and it stopped cleanly, so report as SIG0. The use of
2961 SIGSTOP is an implementation detail. */
2962 ourstatus->value.sig = GDB_SIGNAL_0;
2963 }
2964 else if (current_inferior->last_resume_kind == resume_stop
2965 && WSTOPSIG (w) != SIGSTOP)
2966 {
2967 /* A thread that has been requested to stop by GDB with vCont;t,
2968 but, it stopped for other reasons. */
2969 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2970 }
2971 else
2972 {
2973 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2974 }
2975
2976 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2977
2978 if (debug_threads)
2979 {
2980 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2981 target_pid_to_str (ptid_of (current_inferior)),
2982 ourstatus->kind, ourstatus->value.sig);
2983 debug_exit ();
2984 }
2985
2986 return ptid_of (current_inferior);
2987 }
2988
2989 /* Get rid of any pending event in the pipe. */
2990 static void
2991 async_file_flush (void)
2992 {
2993 int ret;
2994 char buf;
2995
2996 do
2997 ret = read (linux_event_pipe[0], &buf, 1);
2998 while (ret >= 0 || (ret == -1 && errno == EINTR));
2999 }
3000
3001 /* Put something in the pipe, so the event loop wakes up. */
3002 static void
3003 async_file_mark (void)
3004 {
3005 int ret;
3006
3007 async_file_flush ();
3008
3009 do
3010 ret = write (linux_event_pipe[1], "+", 1);
3011 while (ret == 0 || (ret == -1 && errno == EINTR));
3012
3013 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3014 be awakened anyway. */
3015 }
3016
3017 static ptid_t
3018 linux_wait (ptid_t ptid,
3019 struct target_waitstatus *ourstatus, int target_options)
3020 {
3021 ptid_t event_ptid;
3022
3023 /* Flush the async file first. */
3024 if (target_is_async_p ())
3025 async_file_flush ();
3026
3027 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3028
3029 /* If at least one stop was reported, there may be more. A single
3030 SIGCHLD can signal more than one child stop. */
3031 if (target_is_async_p ()
3032 && (target_options & TARGET_WNOHANG) != 0
3033 && !ptid_equal (event_ptid, null_ptid))
3034 async_file_mark ();
3035
3036 return event_ptid;
3037 }
3038
3039 /* Send a signal to an LWP. */
3040
3041 static int
3042 kill_lwp (unsigned long lwpid, int signo)
3043 {
3044 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3045 fails, then we are not using nptl threads and we should be using kill. */
3046
3047 #ifdef __NR_tkill
3048 {
3049 static int tkill_failed;
3050
3051 if (!tkill_failed)
3052 {
3053 int ret;
3054
3055 errno = 0;
3056 ret = syscall (__NR_tkill, lwpid, signo);
3057 if (errno != ENOSYS)
3058 return ret;
3059 tkill_failed = 1;
3060 }
3061 }
3062 #endif
3063
3064 return kill (lwpid, signo);
3065 }
3066
3067 void
3068 linux_stop_lwp (struct lwp_info *lwp)
3069 {
3070 send_sigstop (lwp);
3071 }
3072
3073 static void
3074 send_sigstop (struct lwp_info *lwp)
3075 {
3076 int pid;
3077
3078 pid = lwpid_of (get_lwp_thread (lwp));
3079
3080 /* If we already have a pending stop signal for this process, don't
3081 send another. */
3082 if (lwp->stop_expected)
3083 {
3084 if (debug_threads)
3085 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3086
3087 return;
3088 }
3089
3090 if (debug_threads)
3091 debug_printf ("Sending sigstop to lwp %d\n", pid);
3092
3093 lwp->stop_expected = 1;
3094 kill_lwp (pid, SIGSTOP);
3095 }
3096
3097 static int
3098 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3099 {
3100 struct thread_info *thread = (struct thread_info *) entry;
3101 struct lwp_info *lwp = get_thread_lwp (thread);
3102
3103 /* Ignore EXCEPT. */
3104 if (lwp == except)
3105 return 0;
3106
3107 if (lwp->stopped)
3108 return 0;
3109
3110 send_sigstop (lwp);
3111 return 0;
3112 }
3113
3114 /* Increment the suspend count of an LWP, and stop it, if not stopped
3115 yet. */
3116 static int
3117 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3118 void *except)
3119 {
3120 struct thread_info *thread = (struct thread_info *) entry;
3121 struct lwp_info *lwp = get_thread_lwp (thread);
3122
3123 /* Ignore EXCEPT. */
3124 if (lwp == except)
3125 return 0;
3126
3127 lwp->suspended++;
3128
3129 return send_sigstop_callback (entry, except);
3130 }
3131
3132 static void
3133 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3134 {
3135 /* It's dead, really. */
3136 lwp->dead = 1;
3137
3138 /* Store the exit status for later. */
3139 lwp->status_pending_p = 1;
3140 lwp->status_pending = wstat;
3141
3142 /* Prevent trying to stop it. */
3143 lwp->stopped = 1;
3144
3145 /* No further stops are expected from a dead lwp. */
3146 lwp->stop_expected = 0;
3147 }
3148
3149 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3150
3151 static void
3152 wait_for_sigstop (void)
3153 {
3154 struct thread_info *saved_inferior;
3155 ptid_t saved_tid;
3156 int wstat;
3157 int ret;
3158
3159 saved_inferior = current_inferior;
3160 if (saved_inferior != NULL)
3161 saved_tid = saved_inferior->entry.id;
3162 else
3163 saved_tid = null_ptid; /* avoid bogus unused warning */
3164
3165 if (debug_threads)
3166 debug_printf ("wait_for_sigstop: pulling events\n");
3167
3168 /* Passing NULL_PTID as filter indicates we want all events to be
3169 left pending. Eventually this returns when there are no
3170 unwaited-for children left. */
3171 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3172 &wstat, __WALL);
3173 gdb_assert (ret == -1);
3174
3175 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3176 current_inferior = saved_inferior;
3177 else
3178 {
3179 if (debug_threads)
3180 debug_printf ("Previously current thread died.\n");
3181
3182 if (non_stop)
3183 {
3184 /* We can't change the current inferior behind GDB's back,
3185 otherwise, a subsequent command may apply to the wrong
3186 process. */
3187 current_inferior = NULL;
3188 }
3189 else
3190 {
3191 /* Set a valid thread as current. */
3192 set_desired_inferior (0);
3193 }
3194 }
3195 }
3196
3197 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3198 move it out, because we need to report the stop event to GDB. For
3199 example, if the user puts a breakpoint in the jump pad, it's
3200 because she wants to debug it. */
3201
3202 static int
3203 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3204 {
3205 struct thread_info *thread = (struct thread_info *) entry;
3206 struct lwp_info *lwp = get_thread_lwp (thread);
3207
3208 gdb_assert (lwp->suspended == 0);
3209 gdb_assert (lwp->stopped);
3210
3211 /* Allow debugging the jump pad, gdb_collect, etc.. */
3212 return (supports_fast_tracepoints ()
3213 && agent_loaded_p ()
3214 && (gdb_breakpoint_here (lwp->stop_pc)
3215 || lwp->stopped_by_watchpoint
3216 || thread->last_resume_kind == resume_step)
3217 && linux_fast_tracepoint_collecting (lwp, NULL));
3218 }
3219
3220 static void
3221 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3222 {
3223 struct thread_info *thread = (struct thread_info *) entry;
3224 struct lwp_info *lwp = get_thread_lwp (thread);
3225 int *wstat;
3226
3227 gdb_assert (lwp->suspended == 0);
3228 gdb_assert (lwp->stopped);
3229
3230 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3231
3232 /* Allow debugging the jump pad, gdb_collect, etc. */
3233 if (!gdb_breakpoint_here (lwp->stop_pc)
3234 && !lwp->stopped_by_watchpoint
3235 && thread->last_resume_kind != resume_step
3236 && maybe_move_out_of_jump_pad (lwp, wstat))
3237 {
3238 if (debug_threads)
3239 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3240 lwpid_of (thread));
3241
3242 if (wstat)
3243 {
3244 lwp->status_pending_p = 0;
3245 enqueue_one_deferred_signal (lwp, wstat);
3246
3247 if (debug_threads)
3248 debug_printf ("Signal %d for LWP %ld deferred "
3249 "(in jump pad)\n",
3250 WSTOPSIG (*wstat), lwpid_of (thread));
3251 }
3252
3253 linux_resume_one_lwp (lwp, 0, 0, NULL);
3254 }
3255 else
3256 lwp->suspended++;
3257 }
3258
3259 static int
3260 lwp_running (struct inferior_list_entry *entry, void *data)
3261 {
3262 struct thread_info *thread = (struct thread_info *) entry;
3263 struct lwp_info *lwp = get_thread_lwp (thread);
3264
3265 if (lwp->dead)
3266 return 0;
3267 if (lwp->stopped)
3268 return 0;
3269 return 1;
3270 }
3271
3272 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3273 If SUSPEND, then also increase the suspend count of every LWP,
3274 except EXCEPT. */
3275
3276 static void
3277 stop_all_lwps (int suspend, struct lwp_info *except)
3278 {
3279 /* Should not be called recursively. */
3280 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3281
3282 if (debug_threads)
3283 {
3284 debug_enter ();
3285 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3286 suspend ? "stop-and-suspend" : "stop",
3287 except != NULL
3288 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3289 : "none");
3290 }
3291
3292 stopping_threads = (suspend
3293 ? STOPPING_AND_SUSPENDING_THREADS
3294 : STOPPING_THREADS);
3295
3296 if (suspend)
3297 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3298 else
3299 find_inferior (&all_threads, send_sigstop_callback, except);
3300 wait_for_sigstop ();
3301 stopping_threads = NOT_STOPPING_THREADS;
3302
3303 if (debug_threads)
3304 {
3305 debug_printf ("stop_all_lwps done, setting stopping_threads "
3306 "back to !stopping\n");
3307 debug_exit ();
3308 }
3309 }
3310
3311 /* Resume execution of the inferior process.
3312 If STEP is nonzero, single-step it.
3313 If SIGNAL is nonzero, give it that signal. */
3314
3315 static void
3316 linux_resume_one_lwp (struct lwp_info *lwp,
3317 int step, int signal, siginfo_t *info)
3318 {
3319 struct thread_info *thread = get_lwp_thread (lwp);
3320 struct thread_info *saved_inferior;
3321 int fast_tp_collecting;
3322
3323 if (lwp->stopped == 0)
3324 return;
3325
3326 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3327
3328 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3329
3330 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3331 user used the "jump" command, or "set $pc = foo"). */
3332 if (lwp->stop_pc != get_pc (lwp))
3333 {
3334 /* Collecting 'while-stepping' actions doesn't make sense
3335 anymore. */
3336 release_while_stepping_state_list (thread);
3337 }
3338
3339 /* If we have pending signals or status, and a new signal, enqueue the
3340 signal. Also enqueue the signal if we are waiting to reinsert a
3341 breakpoint; it will be picked up again below. */
3342 if (signal != 0
3343 && (lwp->status_pending_p
3344 || lwp->pending_signals != NULL
3345 || lwp->bp_reinsert != 0
3346 || fast_tp_collecting))
3347 {
3348 struct pending_signals *p_sig;
3349 p_sig = xmalloc (sizeof (*p_sig));
3350 p_sig->prev = lwp->pending_signals;
3351 p_sig->signal = signal;
3352 if (info == NULL)
3353 memset (&p_sig->info, 0, sizeof (siginfo_t));
3354 else
3355 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3356 lwp->pending_signals = p_sig;
3357 }
3358
3359 if (lwp->status_pending_p)
3360 {
3361 if (debug_threads)
3362 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3363 " has pending status\n",
3364 lwpid_of (thread), step ? "step" : "continue", signal,
3365 lwp->stop_expected ? "expected" : "not expected");
3366 return;
3367 }
3368
3369 saved_inferior = current_inferior;
3370 current_inferior = thread;
3371
3372 if (debug_threads)
3373 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3374 lwpid_of (thread), step ? "step" : "continue", signal,
3375 lwp->stop_expected ? "expected" : "not expected");
3376
3377 /* This bit needs some thinking about. If we get a signal that
3378 we must report while a single-step reinsert is still pending,
3379 we often end up resuming the thread. It might be better to
3380 (ew) allow a stack of pending events; then we could be sure that
3381 the reinsert happened right away and not lose any signals.
3382
3383 Making this stack would also shrink the window in which breakpoints are
3384 uninserted (see comment in linux_wait_for_lwp) but not enough for
3385 complete correctness, so it won't solve that problem. It may be
3386 worthwhile just to solve this one, however. */
3387 if (lwp->bp_reinsert != 0)
3388 {
3389 if (debug_threads)
3390 debug_printf (" pending reinsert at 0x%s\n",
3391 paddress (lwp->bp_reinsert));
3392
3393 if (can_hardware_single_step ())
3394 {
3395 if (fast_tp_collecting == 0)
3396 {
3397 if (step == 0)
3398 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3399 if (lwp->suspended)
3400 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3401 lwp->suspended);
3402 }
3403
3404 step = 1;
3405 }
3406
3407 /* Postpone any pending signal. It was enqueued above. */
3408 signal = 0;
3409 }
3410
3411 if (fast_tp_collecting == 1)
3412 {
3413 if (debug_threads)
3414 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3415 " (exit-jump-pad-bkpt)\n",
3416 lwpid_of (thread));
3417
3418 /* Postpone any pending signal. It was enqueued above. */
3419 signal = 0;
3420 }
3421 else if (fast_tp_collecting == 2)
3422 {
3423 if (debug_threads)
3424 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3425 " single-stepping\n",
3426 lwpid_of (thread));
3427
3428 if (can_hardware_single_step ())
3429 step = 1;
3430 else
3431 fatal ("moving out of jump pad single-stepping"
3432 " not implemented on this target");
3433
3434 /* Postpone any pending signal. It was enqueued above. */
3435 signal = 0;
3436 }
3437
3438 /* If we have while-stepping actions in this thread set it stepping.
3439 If we have a signal to deliver, it may or may not be set to
3440 SIG_IGN, we don't know. Assume so, and allow collecting
3441 while-stepping into a signal handler. A possible smart thing to
3442 do would be to set an internal breakpoint at the signal return
3443 address, continue, and carry on catching this while-stepping
3444 action only when that breakpoint is hit. A future
3445 enhancement. */
3446 if (thread->while_stepping != NULL
3447 && can_hardware_single_step ())
3448 {
3449 if (debug_threads)
3450 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3451 lwpid_of (thread));
3452 step = 1;
3453 }
3454
3455 if (debug_threads && the_low_target.get_pc != NULL)
3456 {
3457 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3458 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3459 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3460 }
3461
3462 /* If we have pending signals, consume one unless we are trying to
3463 reinsert a breakpoint or we're trying to finish a fast tracepoint
3464 collect. */
3465 if (lwp->pending_signals != NULL
3466 && lwp->bp_reinsert == 0
3467 && fast_tp_collecting == 0)
3468 {
3469 struct pending_signals **p_sig;
3470
3471 p_sig = &lwp->pending_signals;
3472 while ((*p_sig)->prev != NULL)
3473 p_sig = &(*p_sig)->prev;
3474
3475 signal = (*p_sig)->signal;
3476 if ((*p_sig)->info.si_signo != 0)
3477 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3478 &(*p_sig)->info);
3479
3480 free (*p_sig);
3481 *p_sig = NULL;
3482 }
3483
3484 if (the_low_target.prepare_to_resume != NULL)
3485 the_low_target.prepare_to_resume (lwp);
3486
3487 regcache_invalidate_thread (thread);
3488 errno = 0;
3489 lwp->stopped = 0;
3490 lwp->stopped_by_watchpoint = 0;
3491 lwp->stepping = step;
3492 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3493 (PTRACE_TYPE_ARG3) 0,
3494 /* Coerce to a uintptr_t first to avoid potential gcc warning
3495 of coercing an 8 byte integer to a 4 byte pointer. */
3496 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3497
3498 current_inferior = saved_inferior;
3499 if (errno)
3500 {
3501 /* ESRCH from ptrace either means that the thread was already
3502 running (an error) or that it is gone (a race condition). If
3503 it's gone, we will get a notification the next time we wait,
3504 so we can ignore the error. We could differentiate these
3505 two, but it's tricky without waiting; the thread still exists
3506 as a zombie, so sending it signal 0 would succeed. So just
3507 ignore ESRCH. */
3508 if (errno == ESRCH)
3509 return;
3510
3511 perror_with_name ("ptrace");
3512 }
3513 }
3514
3515 struct thread_resume_array
3516 {
3517 struct thread_resume *resume;
3518 size_t n;
3519 };
3520
3521 /* This function is called once per thread via find_inferior.
3522 ARG is a pointer to a thread_resume_array struct.
3523 We look up the thread specified by ENTRY in ARG, and mark the thread
3524 with a pointer to the appropriate resume request.
3525
3526 This algorithm is O(threads * resume elements), but resume elements
3527 is small (and will remain small at least until GDB supports thread
3528 suspension). */
3529
3530 static int
3531 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3532 {
3533 struct thread_info *thread = (struct thread_info *) entry;
3534 struct lwp_info *lwp = get_thread_lwp (thread);
3535 int ndx;
3536 struct thread_resume_array *r;
3537
3538 r = arg;
3539
3540 for (ndx = 0; ndx < r->n; ndx++)
3541 {
3542 ptid_t ptid = r->resume[ndx].thread;
3543 if (ptid_equal (ptid, minus_one_ptid)
3544 || ptid_equal (ptid, entry->id)
3545 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3546 of PID'. */
3547 || (ptid_get_pid (ptid) == pid_of (thread)
3548 && (ptid_is_pid (ptid)
3549 || ptid_get_lwp (ptid) == -1)))
3550 {
3551 if (r->resume[ndx].kind == resume_stop
3552 && thread->last_resume_kind == resume_stop)
3553 {
3554 if (debug_threads)
3555 debug_printf ("already %s LWP %ld at GDB's request\n",
3556 (thread->last_status.kind
3557 == TARGET_WAITKIND_STOPPED)
3558 ? "stopped"
3559 : "stopping",
3560 lwpid_of (thread));
3561
3562 continue;
3563 }
3564
3565 lwp->resume = &r->resume[ndx];
3566 thread->last_resume_kind = lwp->resume->kind;
3567
3568 lwp->step_range_start = lwp->resume->step_range_start;
3569 lwp->step_range_end = lwp->resume->step_range_end;
3570
3571 /* If we had a deferred signal to report, dequeue one now.
3572 This can happen if LWP gets more than one signal while
3573 trying to get out of a jump pad. */
3574 if (lwp->stopped
3575 && !lwp->status_pending_p
3576 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3577 {
3578 lwp->status_pending_p = 1;
3579
3580 if (debug_threads)
3581 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3582 "leaving status pending.\n",
3583 WSTOPSIG (lwp->status_pending),
3584 lwpid_of (thread));
3585 }
3586
3587 return 0;
3588 }
3589 }
3590
3591 /* No resume action for this thread. */
3592 lwp->resume = NULL;
3593
3594 return 0;
3595 }
3596
3597 /* find_inferior callback for linux_resume.
3598 Set *FLAG_P if this lwp has an interesting status pending. */
3599
3600 static int
3601 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3602 {
3603 struct thread_info *thread = (struct thread_info *) entry;
3604 struct lwp_info *lwp = get_thread_lwp (thread);
3605
3606 /* LWPs which will not be resumed are not interesting, because
3607 we might not wait for them next time through linux_wait. */
3608 if (lwp->resume == NULL)
3609 return 0;
3610
3611 if (lwp->status_pending_p)
3612 * (int *) flag_p = 1;
3613
3614 return 0;
3615 }
3616
3617 /* Return 1 if this lwp that GDB wants running is stopped at an
3618 internal breakpoint that we need to step over. It assumes that any
3619 required STOP_PC adjustment has already been propagated to the
3620 inferior's regcache. */
3621
3622 static int
3623 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3624 {
3625 struct thread_info *thread = (struct thread_info *) entry;
3626 struct lwp_info *lwp = get_thread_lwp (thread);
3627 struct thread_info *saved_inferior;
3628 CORE_ADDR pc;
3629
3630 /* LWPs which will not be resumed are not interesting, because we
3631 might not wait for them next time through linux_wait. */
3632
3633 if (!lwp->stopped)
3634 {
3635 if (debug_threads)
3636 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3637 lwpid_of (thread));
3638 return 0;
3639 }
3640
3641 if (thread->last_resume_kind == resume_stop)
3642 {
3643 if (debug_threads)
3644 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3645 " stopped\n",
3646 lwpid_of (thread));
3647 return 0;
3648 }
3649
3650 gdb_assert (lwp->suspended >= 0);
3651
3652 if (lwp->suspended)
3653 {
3654 if (debug_threads)
3655 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3656 lwpid_of (thread));
3657 return 0;
3658 }
3659
3660 if (!lwp->need_step_over)
3661 {
3662 if (debug_threads)
3663 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3664 }
3665
3666 if (lwp->status_pending_p)
3667 {
3668 if (debug_threads)
3669 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3670 " status.\n",
3671 lwpid_of (thread));
3672 return 0;
3673 }
3674
3675 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3676 or we have. */
3677 pc = get_pc (lwp);
3678
3679 /* If the PC has changed since we stopped, then don't do anything,
3680 and let the breakpoint/tracepoint be hit. This happens if, for
3681 instance, GDB handled the decr_pc_after_break subtraction itself,
3682 GDB is OOL stepping this thread, or the user has issued a "jump"
3683 command, or poked thread's registers herself. */
3684 if (pc != lwp->stop_pc)
3685 {
3686 if (debug_threads)
3687 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3688 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3689 lwpid_of (thread),
3690 paddress (lwp->stop_pc), paddress (pc));
3691
3692 lwp->need_step_over = 0;
3693 return 0;
3694 }
3695
3696 saved_inferior = current_inferior;
3697 current_inferior = thread;
3698
3699 /* We can only step over breakpoints we know about. */
3700 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3701 {
3702 /* Don't step over a breakpoint that GDB expects to hit
3703 though. If the condition is being evaluated on the target's side
3704 and it evaluate to false, step over this breakpoint as well. */
3705 if (gdb_breakpoint_here (pc)
3706 && gdb_condition_true_at_breakpoint (pc)
3707 && gdb_no_commands_at_breakpoint (pc))
3708 {
3709 if (debug_threads)
3710 debug_printf ("Need step over [LWP %ld]? yes, but found"
3711 " GDB breakpoint at 0x%s; skipping step over\n",
3712 lwpid_of (thread), paddress (pc));
3713
3714 current_inferior = saved_inferior;
3715 return 0;
3716 }
3717 else
3718 {
3719 if (debug_threads)
3720 debug_printf ("Need step over [LWP %ld]? yes, "
3721 "found breakpoint at 0x%s\n",
3722 lwpid_of (thread), paddress (pc));
3723
3724 /* We've found an lwp that needs stepping over --- return 1 so
3725 that find_inferior stops looking. */
3726 current_inferior = saved_inferior;
3727
3728 /* If the step over is cancelled, this is set again. */
3729 lwp->need_step_over = 0;
3730 return 1;
3731 }
3732 }
3733
3734 current_inferior = saved_inferior;
3735
3736 if (debug_threads)
3737 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3738 " at 0x%s\n",
3739 lwpid_of (thread), paddress (pc));
3740
3741 return 0;
3742 }
3743
3744 /* Start a step-over operation on LWP. When LWP stopped at a
3745 breakpoint, to make progress, we need to remove the breakpoint out
3746 of the way. If we let other threads run while we do that, they may
3747 pass by the breakpoint location and miss hitting it. To avoid
3748 that, a step-over momentarily stops all threads while LWP is
3749 single-stepped while the breakpoint is temporarily uninserted from
3750 the inferior. When the single-step finishes, we reinsert the
3751 breakpoint, and let all threads that are supposed to be running,
3752 run again.
3753
3754 On targets that don't support hardware single-step, we don't
3755 currently support full software single-stepping. Instead, we only
3756 support stepping over the thread event breakpoint, by asking the
3757 low target where to place a reinsert breakpoint. Since this
3758 routine assumes the breakpoint being stepped over is a thread event
3759 breakpoint, it usually assumes the return address of the current
3760 function is a good enough place to set the reinsert breakpoint. */
3761
3762 static int
3763 start_step_over (struct lwp_info *lwp)
3764 {
3765 struct thread_info *thread = get_lwp_thread (lwp);
3766 struct thread_info *saved_inferior;
3767 CORE_ADDR pc;
3768 int step;
3769
3770 if (debug_threads)
3771 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3772 lwpid_of (thread));
3773
3774 stop_all_lwps (1, lwp);
3775 gdb_assert (lwp->suspended == 0);
3776
3777 if (debug_threads)
3778 debug_printf ("Done stopping all threads for step-over.\n");
3779
3780 /* Note, we should always reach here with an already adjusted PC,
3781 either by GDB (if we're resuming due to GDB's request), or by our
3782 caller, if we just finished handling an internal breakpoint GDB
3783 shouldn't care about. */
3784 pc = get_pc (lwp);
3785
3786 saved_inferior = current_inferior;
3787 current_inferior = thread;
3788
3789 lwp->bp_reinsert = pc;
3790 uninsert_breakpoints_at (pc);
3791 uninsert_fast_tracepoint_jumps_at (pc);
3792
3793 if (can_hardware_single_step ())
3794 {
3795 step = 1;
3796 }
3797 else
3798 {
3799 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3800 set_reinsert_breakpoint (raddr);
3801 step = 0;
3802 }
3803
3804 current_inferior = saved_inferior;
3805
3806 linux_resume_one_lwp (lwp, step, 0, NULL);
3807
3808 /* Require next event from this LWP. */
3809 step_over_bkpt = thread->entry.id;
3810 return 1;
3811 }
3812
3813 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3814 start_step_over, if still there, and delete any reinsert
3815 breakpoints we've set, on non hardware single-step targets. */
3816
3817 static int
3818 finish_step_over (struct lwp_info *lwp)
3819 {
3820 if (lwp->bp_reinsert != 0)
3821 {
3822 if (debug_threads)
3823 debug_printf ("Finished step over.\n");
3824
3825 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3826 may be no breakpoint to reinsert there by now. */
3827 reinsert_breakpoints_at (lwp->bp_reinsert);
3828 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3829
3830 lwp->bp_reinsert = 0;
3831
3832 /* Delete any software-single-step reinsert breakpoints. No
3833 longer needed. We don't have to worry about other threads
3834 hitting this trap, and later not being able to explain it,
3835 because we were stepping over a breakpoint, and we hold all
3836 threads but LWP stopped while doing that. */
3837 if (!can_hardware_single_step ())
3838 delete_reinsert_breakpoints ();
3839
3840 step_over_bkpt = null_ptid;
3841 return 1;
3842 }
3843 else
3844 return 0;
3845 }
3846
3847 /* This function is called once per thread. We check the thread's resume
3848 request, which will tell us whether to resume, step, or leave the thread
3849 stopped; and what signal, if any, it should be sent.
3850
3851 For threads which we aren't explicitly told otherwise, we preserve
3852 the stepping flag; this is used for stepping over gdbserver-placed
3853 breakpoints.
3854
3855 If pending_flags was set in any thread, we queue any needed
3856 signals, since we won't actually resume. We already have a pending
3857 event to report, so we don't need to preserve any step requests;
3858 they should be re-issued if necessary. */
3859
3860 static int
3861 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3862 {
3863 struct thread_info *thread = (struct thread_info *) entry;
3864 struct lwp_info *lwp = get_thread_lwp (thread);
3865 int step;
3866 int leave_all_stopped = * (int *) arg;
3867 int leave_pending;
3868
3869 if (lwp->resume == NULL)
3870 return 0;
3871
3872 if (lwp->resume->kind == resume_stop)
3873 {
3874 if (debug_threads)
3875 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3876
3877 if (!lwp->stopped)
3878 {
3879 if (debug_threads)
3880 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3881
3882 /* Stop the thread, and wait for the event asynchronously,
3883 through the event loop. */
3884 send_sigstop (lwp);
3885 }
3886 else
3887 {
3888 if (debug_threads)
3889 debug_printf ("already stopped LWP %ld\n",
3890 lwpid_of (thread));
3891
3892 /* The LWP may have been stopped in an internal event that
3893 was not meant to be notified back to GDB (e.g., gdbserver
3894 breakpoint), so we should be reporting a stop event in
3895 this case too. */
3896
3897 /* If the thread already has a pending SIGSTOP, this is a
3898 no-op. Otherwise, something later will presumably resume
3899 the thread and this will cause it to cancel any pending
3900 operation, due to last_resume_kind == resume_stop. If
3901 the thread already has a pending status to report, we
3902 will still report it the next time we wait - see
3903 status_pending_p_callback. */
3904
3905 /* If we already have a pending signal to report, then
3906 there's no need to queue a SIGSTOP, as this means we're
3907 midway through moving the LWP out of the jumppad, and we
3908 will report the pending signal as soon as that is
3909 finished. */
3910 if (lwp->pending_signals_to_report == NULL)
3911 send_sigstop (lwp);
3912 }
3913
3914 /* For stop requests, we're done. */
3915 lwp->resume = NULL;
3916 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3917 return 0;
3918 }
3919
3920 /* If this thread which is about to be resumed has a pending status,
3921 then don't resume any threads - we can just report the pending
3922 status. Make sure to queue any signals that would otherwise be
3923 sent. In all-stop mode, we do this decision based on if *any*
3924 thread has a pending status. If there's a thread that needs the
3925 step-over-breakpoint dance, then don't resume any other thread
3926 but that particular one. */
3927 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3928
3929 if (!leave_pending)
3930 {
3931 if (debug_threads)
3932 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3933
3934 step = (lwp->resume->kind == resume_step);
3935 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3936 }
3937 else
3938 {
3939 if (debug_threads)
3940 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3941
3942 /* If we have a new signal, enqueue the signal. */
3943 if (lwp->resume->sig != 0)
3944 {
3945 struct pending_signals *p_sig;
3946 p_sig = xmalloc (sizeof (*p_sig));
3947 p_sig->prev = lwp->pending_signals;
3948 p_sig->signal = lwp->resume->sig;
3949 memset (&p_sig->info, 0, sizeof (siginfo_t));
3950
3951 /* If this is the same signal we were previously stopped by,
3952 make sure to queue its siginfo. We can ignore the return
3953 value of ptrace; if it fails, we'll skip
3954 PTRACE_SETSIGINFO. */
3955 if (WIFSTOPPED (lwp->last_status)
3956 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3957 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3958 &p_sig->info);
3959
3960 lwp->pending_signals = p_sig;
3961 }
3962 }
3963
3964 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3965 lwp->resume = NULL;
3966 return 0;
3967 }
3968
3969 static void
3970 linux_resume (struct thread_resume *resume_info, size_t n)
3971 {
3972 struct thread_resume_array array = { resume_info, n };
3973 struct thread_info *need_step_over = NULL;
3974 int any_pending;
3975 int leave_all_stopped;
3976
3977 if (debug_threads)
3978 {
3979 debug_enter ();
3980 debug_printf ("linux_resume:\n");
3981 }
3982
3983 find_inferior (&all_threads, linux_set_resume_request, &array);
3984
3985 /* If there is a thread which would otherwise be resumed, which has
3986 a pending status, then don't resume any threads - we can just
3987 report the pending status. Make sure to queue any signals that
3988 would otherwise be sent. In non-stop mode, we'll apply this
3989 logic to each thread individually. We consume all pending events
3990 before considering to start a step-over (in all-stop). */
3991 any_pending = 0;
3992 if (!non_stop)
3993 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
3994
3995 /* If there is a thread which would otherwise be resumed, which is
3996 stopped at a breakpoint that needs stepping over, then don't
3997 resume any threads - have it step over the breakpoint with all
3998 other threads stopped, then resume all threads again. Make sure
3999 to queue any signals that would otherwise be delivered or
4000 queued. */
4001 if (!any_pending && supports_breakpoints ())
4002 need_step_over
4003 = (struct thread_info *) find_inferior (&all_threads,
4004 need_step_over_p, NULL);
4005
4006 leave_all_stopped = (need_step_over != NULL || any_pending);
4007
4008 if (debug_threads)
4009 {
4010 if (need_step_over != NULL)
4011 debug_printf ("Not resuming all, need step over\n");
4012 else if (any_pending)
4013 debug_printf ("Not resuming, all-stop and found "
4014 "an LWP with pending status\n");
4015 else
4016 debug_printf ("Resuming, no pending status or step over needed\n");
4017 }
4018
4019 /* Even if we're leaving threads stopped, queue all signals we'd
4020 otherwise deliver. */
4021 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4022
4023 if (need_step_over)
4024 start_step_over (get_thread_lwp (need_step_over));
4025
4026 if (debug_threads)
4027 {
4028 debug_printf ("linux_resume done\n");
4029 debug_exit ();
4030 }
4031 }
4032
4033 /* This function is called once per thread. We check the thread's
4034 last resume request, which will tell us whether to resume, step, or
4035 leave the thread stopped. Any signal the client requested to be
4036 delivered has already been enqueued at this point.
4037
4038 If any thread that GDB wants running is stopped at an internal
4039 breakpoint that needs stepping over, we start a step-over operation
4040 on that particular thread, and leave all others stopped. */
4041
4042 static int
4043 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4044 {
4045 struct thread_info *thread = (struct thread_info *) entry;
4046 struct lwp_info *lwp = get_thread_lwp (thread);
4047 int step;
4048
4049 if (lwp == except)
4050 return 0;
4051
4052 if (debug_threads)
4053 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4054
4055 if (!lwp->stopped)
4056 {
4057 if (debug_threads)
4058 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4059 return 0;
4060 }
4061
4062 if (thread->last_resume_kind == resume_stop
4063 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4064 {
4065 if (debug_threads)
4066 debug_printf (" client wants LWP to remain %ld stopped\n",
4067 lwpid_of (thread));
4068 return 0;
4069 }
4070
4071 if (lwp->status_pending_p)
4072 {
4073 if (debug_threads)
4074 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4075 lwpid_of (thread));
4076 return 0;
4077 }
4078
4079 gdb_assert (lwp->suspended >= 0);
4080
4081 if (lwp->suspended)
4082 {
4083 if (debug_threads)
4084 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4085 return 0;
4086 }
4087
4088 if (thread->last_resume_kind == resume_stop
4089 && lwp->pending_signals_to_report == NULL
4090 && lwp->collecting_fast_tracepoint == 0)
4091 {
4092 /* We haven't reported this LWP as stopped yet (otherwise, the
4093 last_status.kind check above would catch it, and we wouldn't
4094 reach here. This LWP may have been momentarily paused by a
4095 stop_all_lwps call while handling for example, another LWP's
4096 step-over. In that case, the pending expected SIGSTOP signal
4097 that was queued at vCont;t handling time will have already
4098 been consumed by wait_for_sigstop, and so we need to requeue
4099 another one here. Note that if the LWP already has a SIGSTOP
4100 pending, this is a no-op. */
4101
4102 if (debug_threads)
4103 debug_printf ("Client wants LWP %ld to stop. "
4104 "Making sure it has a SIGSTOP pending\n",
4105 lwpid_of (thread));
4106
4107 send_sigstop (lwp);
4108 }
4109
4110 step = thread->last_resume_kind == resume_step;
4111 linux_resume_one_lwp (lwp, step, 0, NULL);
4112 return 0;
4113 }
4114
4115 static int
4116 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4117 {
4118 struct thread_info *thread = (struct thread_info *) entry;
4119 struct lwp_info *lwp = get_thread_lwp (thread);
4120
4121 if (lwp == except)
4122 return 0;
4123
4124 lwp->suspended--;
4125 gdb_assert (lwp->suspended >= 0);
4126
4127 return proceed_one_lwp (entry, except);
4128 }
4129
4130 /* When we finish a step-over, set threads running again. If there's
4131 another thread that may need a step-over, now's the time to start
4132 it. Eventually, we'll move all threads past their breakpoints. */
4133
4134 static void
4135 proceed_all_lwps (void)
4136 {
4137 struct thread_info *need_step_over;
4138
4139 /* If there is a thread which would otherwise be resumed, which is
4140 stopped at a breakpoint that needs stepping over, then don't
4141 resume any threads - have it step over the breakpoint with all
4142 other threads stopped, then resume all threads again. */
4143
4144 if (supports_breakpoints ())
4145 {
4146 need_step_over
4147 = (struct thread_info *) find_inferior (&all_threads,
4148 need_step_over_p, NULL);
4149
4150 if (need_step_over != NULL)
4151 {
4152 if (debug_threads)
4153 debug_printf ("proceed_all_lwps: found "
4154 "thread %ld needing a step-over\n",
4155 lwpid_of (need_step_over));
4156
4157 start_step_over (get_thread_lwp (need_step_over));
4158 return;
4159 }
4160 }
4161
4162 if (debug_threads)
4163 debug_printf ("Proceeding, no step-over needed\n");
4164
4165 find_inferior (&all_threads, proceed_one_lwp, NULL);
4166 }
4167
4168 /* Stopped LWPs that the client wanted to be running, that don't have
4169 pending statuses, are set to run again, except for EXCEPT, if not
4170 NULL. This undoes a stop_all_lwps call. */
4171
4172 static void
4173 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4174 {
4175 if (debug_threads)
4176 {
4177 debug_enter ();
4178 if (except)
4179 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4180 lwpid_of (get_lwp_thread (except)));
4181 else
4182 debug_printf ("unstopping all lwps\n");
4183 }
4184
4185 if (unsuspend)
4186 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4187 else
4188 find_inferior (&all_threads, proceed_one_lwp, except);
4189
4190 if (debug_threads)
4191 {
4192 debug_printf ("unstop_all_lwps done\n");
4193 debug_exit ();
4194 }
4195 }
4196
4197
4198 #ifdef HAVE_LINUX_REGSETS
4199
4200 #define use_linux_regsets 1
4201
4202 /* Returns true if REGSET has been disabled. */
4203
4204 static int
4205 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4206 {
4207 return (info->disabled_regsets != NULL
4208 && info->disabled_regsets[regset - info->regsets]);
4209 }
4210
4211 /* Disable REGSET. */
4212
4213 static void
4214 disable_regset (struct regsets_info *info, struct regset_info *regset)
4215 {
4216 int dr_offset;
4217
4218 dr_offset = regset - info->regsets;
4219 if (info->disabled_regsets == NULL)
4220 info->disabled_regsets = xcalloc (1, info->num_regsets);
4221 info->disabled_regsets[dr_offset] = 1;
4222 }
4223
4224 static int
4225 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4226 struct regcache *regcache)
4227 {
4228 struct regset_info *regset;
4229 int saw_general_regs = 0;
4230 int pid;
4231 struct iovec iov;
4232
4233 regset = regsets_info->regsets;
4234
4235 pid = lwpid_of (current_inferior);
4236 while (regset->size >= 0)
4237 {
4238 void *buf, *data;
4239 int nt_type, res;
4240
4241 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4242 {
4243 regset ++;
4244 continue;
4245 }
4246
4247 buf = xmalloc (regset->size);
4248
4249 nt_type = regset->nt_type;
4250 if (nt_type)
4251 {
4252 iov.iov_base = buf;
4253 iov.iov_len = regset->size;
4254 data = (void *) &iov;
4255 }
4256 else
4257 data = buf;
4258
4259 #ifndef __sparc__
4260 res = ptrace (regset->get_request, pid,
4261 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4262 #else
4263 res = ptrace (regset->get_request, pid, data, nt_type);
4264 #endif
4265 if (res < 0)
4266 {
4267 if (errno == EIO)
4268 {
4269 /* If we get EIO on a regset, do not try it again for
4270 this process mode. */
4271 disable_regset (regsets_info, regset);
4272 free (buf);
4273 continue;
4274 }
4275 else
4276 {
4277 char s[256];
4278 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4279 pid);
4280 perror (s);
4281 }
4282 }
4283 else if (regset->type == GENERAL_REGS)
4284 saw_general_regs = 1;
4285 regset->store_function (regcache, buf);
4286 regset ++;
4287 free (buf);
4288 }
4289 if (saw_general_regs)
4290 return 0;
4291 else
4292 return 1;
4293 }
4294
4295 static int
4296 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4297 struct regcache *regcache)
4298 {
4299 struct regset_info *regset;
4300 int saw_general_regs = 0;
4301 int pid;
4302 struct iovec iov;
4303
4304 regset = regsets_info->regsets;
4305
4306 pid = lwpid_of (current_inferior);
4307 while (regset->size >= 0)
4308 {
4309 void *buf, *data;
4310 int nt_type, res;
4311
4312 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4313 {
4314 regset ++;
4315 continue;
4316 }
4317
4318 buf = xmalloc (regset->size);
4319
4320 /* First fill the buffer with the current register set contents,
4321 in case there are any items in the kernel's regset that are
4322 not in gdbserver's regcache. */
4323
4324 nt_type = regset->nt_type;
4325 if (nt_type)
4326 {
4327 iov.iov_base = buf;
4328 iov.iov_len = regset->size;
4329 data = (void *) &iov;
4330 }
4331 else
4332 data = buf;
4333
4334 #ifndef __sparc__
4335 res = ptrace (regset->get_request, pid,
4336 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4337 #else
4338 res = ptrace (regset->get_request, pid, data, nt_type);
4339 #endif
4340
4341 if (res == 0)
4342 {
4343 /* Then overlay our cached registers on that. */
4344 regset->fill_function (regcache, buf);
4345
4346 /* Only now do we write the register set. */
4347 #ifndef __sparc__
4348 res = ptrace (regset->set_request, pid,
4349 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4350 #else
4351 res = ptrace (regset->set_request, pid, data, nt_type);
4352 #endif
4353 }
4354
4355 if (res < 0)
4356 {
4357 if (errno == EIO)
4358 {
4359 /* If we get EIO on a regset, do not try it again for
4360 this process mode. */
4361 disable_regset (regsets_info, regset);
4362 free (buf);
4363 continue;
4364 }
4365 else if (errno == ESRCH)
4366 {
4367 /* At this point, ESRCH should mean the process is
4368 already gone, in which case we simply ignore attempts
4369 to change its registers. See also the related
4370 comment in linux_resume_one_lwp. */
4371 free (buf);
4372 return 0;
4373 }
4374 else
4375 {
4376 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4377 }
4378 }
4379 else if (regset->type == GENERAL_REGS)
4380 saw_general_regs = 1;
4381 regset ++;
4382 free (buf);
4383 }
4384 if (saw_general_regs)
4385 return 0;
4386 else
4387 return 1;
4388 }
4389
4390 #else /* !HAVE_LINUX_REGSETS */
4391
4392 #define use_linux_regsets 0
4393 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4394 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4395
4396 #endif
4397
4398 /* Return 1 if register REGNO is supported by one of the regset ptrace
4399 calls or 0 if it has to be transferred individually. */
4400
4401 static int
4402 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4403 {
4404 unsigned char mask = 1 << (regno % 8);
4405 size_t index = regno / 8;
4406
4407 return (use_linux_regsets
4408 && (regs_info->regset_bitmap == NULL
4409 || (regs_info->regset_bitmap[index] & mask) != 0));
4410 }
4411
4412 #ifdef HAVE_LINUX_USRREGS
4413
4414 int
4415 register_addr (const struct usrregs_info *usrregs, int regnum)
4416 {
4417 int addr;
4418
4419 if (regnum < 0 || regnum >= usrregs->num_regs)
4420 error ("Invalid register number %d.", regnum);
4421
4422 addr = usrregs->regmap[regnum];
4423
4424 return addr;
4425 }
4426
4427 /* Fetch one register. */
4428 static void
4429 fetch_register (const struct usrregs_info *usrregs,
4430 struct regcache *regcache, int regno)
4431 {
4432 CORE_ADDR regaddr;
4433 int i, size;
4434 char *buf;
4435 int pid;
4436
4437 if (regno >= usrregs->num_regs)
4438 return;
4439 if ((*the_low_target.cannot_fetch_register) (regno))
4440 return;
4441
4442 regaddr = register_addr (usrregs, regno);
4443 if (regaddr == -1)
4444 return;
4445
4446 size = ((register_size (regcache->tdesc, regno)
4447 + sizeof (PTRACE_XFER_TYPE) - 1)
4448 & -sizeof (PTRACE_XFER_TYPE));
4449 buf = alloca (size);
4450
4451 pid = lwpid_of (current_inferior);
4452 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4453 {
4454 errno = 0;
4455 *(PTRACE_XFER_TYPE *) (buf + i) =
4456 ptrace (PTRACE_PEEKUSER, pid,
4457 /* Coerce to a uintptr_t first to avoid potential gcc warning
4458 of coercing an 8 byte integer to a 4 byte pointer. */
4459 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4460 regaddr += sizeof (PTRACE_XFER_TYPE);
4461 if (errno != 0)
4462 error ("reading register %d: %s", regno, strerror (errno));
4463 }
4464
4465 if (the_low_target.supply_ptrace_register)
4466 the_low_target.supply_ptrace_register (regcache, regno, buf);
4467 else
4468 supply_register (regcache, regno, buf);
4469 }
4470
4471 /* Store one register. */
4472 static void
4473 store_register (const struct usrregs_info *usrregs,
4474 struct regcache *regcache, int regno)
4475 {
4476 CORE_ADDR regaddr;
4477 int i, size;
4478 char *buf;
4479 int pid;
4480
4481 if (regno >= usrregs->num_regs)
4482 return;
4483 if ((*the_low_target.cannot_store_register) (regno))
4484 return;
4485
4486 regaddr = register_addr (usrregs, regno);
4487 if (regaddr == -1)
4488 return;
4489
4490 size = ((register_size (regcache->tdesc, regno)
4491 + sizeof (PTRACE_XFER_TYPE) - 1)
4492 & -sizeof (PTRACE_XFER_TYPE));
4493 buf = alloca (size);
4494 memset (buf, 0, size);
4495
4496 if (the_low_target.collect_ptrace_register)
4497 the_low_target.collect_ptrace_register (regcache, regno, buf);
4498 else
4499 collect_register (regcache, regno, buf);
4500
4501 pid = lwpid_of (current_inferior);
4502 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4503 {
4504 errno = 0;
4505 ptrace (PTRACE_POKEUSER, pid,
4506 /* Coerce to a uintptr_t first to avoid potential gcc warning
4507 about coercing an 8 byte integer to a 4 byte pointer. */
4508 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4509 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4510 if (errno != 0)
4511 {
4512 /* At this point, ESRCH should mean the process is
4513 already gone, in which case we simply ignore attempts
4514 to change its registers. See also the related
4515 comment in linux_resume_one_lwp. */
4516 if (errno == ESRCH)
4517 return;
4518
4519 if ((*the_low_target.cannot_store_register) (regno) == 0)
4520 error ("writing register %d: %s", regno, strerror (errno));
4521 }
4522 regaddr += sizeof (PTRACE_XFER_TYPE);
4523 }
4524 }
4525
4526 /* Fetch all registers, or just one, from the child process.
4527 If REGNO is -1, do this for all registers, skipping any that are
4528 assumed to have been retrieved by regsets_fetch_inferior_registers,
4529 unless ALL is non-zero.
4530 Otherwise, REGNO specifies which register (so we can save time). */
4531 static void
4532 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4533 struct regcache *regcache, int regno, int all)
4534 {
4535 struct usrregs_info *usr = regs_info->usrregs;
4536
4537 if (regno == -1)
4538 {
4539 for (regno = 0; regno < usr->num_regs; regno++)
4540 if (all || !linux_register_in_regsets (regs_info, regno))
4541 fetch_register (usr, regcache, regno);
4542 }
4543 else
4544 fetch_register (usr, regcache, regno);
4545 }
4546
4547 /* Store our register values back into the inferior.
4548 If REGNO is -1, do this for all registers, skipping any that are
4549 assumed to have been saved by regsets_store_inferior_registers,
4550 unless ALL is non-zero.
4551 Otherwise, REGNO specifies which register (so we can save time). */
4552 static void
4553 usr_store_inferior_registers (const struct regs_info *regs_info,
4554 struct regcache *regcache, int regno, int all)
4555 {
4556 struct usrregs_info *usr = regs_info->usrregs;
4557
4558 if (regno == -1)
4559 {
4560 for (regno = 0; regno < usr->num_regs; regno++)
4561 if (all || !linux_register_in_regsets (regs_info, regno))
4562 store_register (usr, regcache, regno);
4563 }
4564 else
4565 store_register (usr, regcache, regno);
4566 }
4567
4568 #else /* !HAVE_LINUX_USRREGS */
4569
4570 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4571 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4572
4573 #endif
4574
4575
4576 void
4577 linux_fetch_registers (struct regcache *regcache, int regno)
4578 {
4579 int use_regsets;
4580 int all = 0;
4581 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4582
4583 if (regno == -1)
4584 {
4585 if (the_low_target.fetch_register != NULL
4586 && regs_info->usrregs != NULL)
4587 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4588 (*the_low_target.fetch_register) (regcache, regno);
4589
4590 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4591 if (regs_info->usrregs != NULL)
4592 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4593 }
4594 else
4595 {
4596 if (the_low_target.fetch_register != NULL
4597 && (*the_low_target.fetch_register) (regcache, regno))
4598 return;
4599
4600 use_regsets = linux_register_in_regsets (regs_info, regno);
4601 if (use_regsets)
4602 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4603 regcache);
4604 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4605 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4606 }
4607 }
4608
4609 void
4610 linux_store_registers (struct regcache *regcache, int regno)
4611 {
4612 int use_regsets;
4613 int all = 0;
4614 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4615
4616 if (regno == -1)
4617 {
4618 all = regsets_store_inferior_registers (regs_info->regsets_info,
4619 regcache);
4620 if (regs_info->usrregs != NULL)
4621 usr_store_inferior_registers (regs_info, regcache, regno, all);
4622 }
4623 else
4624 {
4625 use_regsets = linux_register_in_regsets (regs_info, regno);
4626 if (use_regsets)
4627 all = regsets_store_inferior_registers (regs_info->regsets_info,
4628 regcache);
4629 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4630 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4631 }
4632 }
4633
4634
4635 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4636 to debugger memory starting at MYADDR. */
4637
4638 static int
4639 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4640 {
4641 int pid = lwpid_of (current_inferior);
4642 register PTRACE_XFER_TYPE *buffer;
4643 register CORE_ADDR addr;
4644 register int count;
4645 char filename[64];
4646 register int i;
4647 int ret;
4648 int fd;
4649
4650 /* Try using /proc. Don't bother for one word. */
4651 if (len >= 3 * sizeof (long))
4652 {
4653 int bytes;
4654
4655 /* We could keep this file open and cache it - possibly one per
4656 thread. That requires some juggling, but is even faster. */
4657 sprintf (filename, "/proc/%d/mem", pid);
4658 fd = open (filename, O_RDONLY | O_LARGEFILE);
4659 if (fd == -1)
4660 goto no_proc;
4661
4662 /* If pread64 is available, use it. It's faster if the kernel
4663 supports it (only one syscall), and it's 64-bit safe even on
4664 32-bit platforms (for instance, SPARC debugging a SPARC64
4665 application). */
4666 #ifdef HAVE_PREAD64
4667 bytes = pread64 (fd, myaddr, len, memaddr);
4668 #else
4669 bytes = -1;
4670 if (lseek (fd, memaddr, SEEK_SET) != -1)
4671 bytes = read (fd, myaddr, len);
4672 #endif
4673
4674 close (fd);
4675 if (bytes == len)
4676 return 0;
4677
4678 /* Some data was read, we'll try to get the rest with ptrace. */
4679 if (bytes > 0)
4680 {
4681 memaddr += bytes;
4682 myaddr += bytes;
4683 len -= bytes;
4684 }
4685 }
4686
4687 no_proc:
4688 /* Round starting address down to longword boundary. */
4689 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4690 /* Round ending address up; get number of longwords that makes. */
4691 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4692 / sizeof (PTRACE_XFER_TYPE));
4693 /* Allocate buffer of that many longwords. */
4694 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4695
4696 /* Read all the longwords */
4697 errno = 0;
4698 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4699 {
4700 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4701 about coercing an 8 byte integer to a 4 byte pointer. */
4702 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4703 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4704 (PTRACE_TYPE_ARG4) 0);
4705 if (errno)
4706 break;
4707 }
4708 ret = errno;
4709
4710 /* Copy appropriate bytes out of the buffer. */
4711 if (i > 0)
4712 {
4713 i *= sizeof (PTRACE_XFER_TYPE);
4714 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4715 memcpy (myaddr,
4716 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4717 i < len ? i : len);
4718 }
4719
4720 return ret;
4721 }
4722
4723 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4724 memory at MEMADDR. On failure (cannot write to the inferior)
4725 returns the value of errno. Always succeeds if LEN is zero. */
4726
4727 static int
4728 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4729 {
4730 register int i;
4731 /* Round starting address down to longword boundary. */
4732 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4733 /* Round ending address up; get number of longwords that makes. */
4734 register int count
4735 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4736 / sizeof (PTRACE_XFER_TYPE);
4737
4738 /* Allocate buffer of that many longwords. */
4739 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4740 alloca (count * sizeof (PTRACE_XFER_TYPE));
4741
4742 int pid = lwpid_of (current_inferior);
4743
4744 if (len == 0)
4745 {
4746 /* Zero length write always succeeds. */
4747 return 0;
4748 }
4749
4750 if (debug_threads)
4751 {
4752 /* Dump up to four bytes. */
4753 unsigned int val = * (unsigned int *) myaddr;
4754 if (len == 1)
4755 val = val & 0xff;
4756 else if (len == 2)
4757 val = val & 0xffff;
4758 else if (len == 3)
4759 val = val & 0xffffff;
4760 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4761 val, (long)memaddr);
4762 }
4763
4764 /* Fill start and end extra bytes of buffer with existing memory data. */
4765
4766 errno = 0;
4767 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4768 about coercing an 8 byte integer to a 4 byte pointer. */
4769 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4770 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4771 (PTRACE_TYPE_ARG4) 0);
4772 if (errno)
4773 return errno;
4774
4775 if (count > 1)
4776 {
4777 errno = 0;
4778 buffer[count - 1]
4779 = ptrace (PTRACE_PEEKTEXT, pid,
4780 /* Coerce to a uintptr_t first to avoid potential gcc warning
4781 about coercing an 8 byte integer to a 4 byte pointer. */
4782 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4783 * sizeof (PTRACE_XFER_TYPE)),
4784 (PTRACE_TYPE_ARG4) 0);
4785 if (errno)
4786 return errno;
4787 }
4788
4789 /* Copy data to be written over corresponding part of buffer. */
4790
4791 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4792 myaddr, len);
4793
4794 /* Write the entire buffer. */
4795
4796 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4797 {
4798 errno = 0;
4799 ptrace (PTRACE_POKETEXT, pid,
4800 /* Coerce to a uintptr_t first to avoid potential gcc warning
4801 about coercing an 8 byte integer to a 4 byte pointer. */
4802 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4803 (PTRACE_TYPE_ARG4) buffer[i]);
4804 if (errno)
4805 return errno;
4806 }
4807
4808 return 0;
4809 }
4810
4811 static void
4812 linux_look_up_symbols (void)
4813 {
4814 #ifdef USE_THREAD_DB
4815 struct process_info *proc = current_process ();
4816
4817 if (proc->private->thread_db != NULL)
4818 return;
4819
4820 /* If the kernel supports tracing clones, then we don't need to
4821 use the magic thread event breakpoint to learn about
4822 threads. */
4823 thread_db_init (!linux_supports_traceclone ());
4824 #endif
4825 }
4826
4827 static void
4828 linux_request_interrupt (void)
4829 {
4830 extern unsigned long signal_pid;
4831
4832 if (!ptid_equal (cont_thread, null_ptid)
4833 && !ptid_equal (cont_thread, minus_one_ptid))
4834 {
4835 int lwpid;
4836
4837 lwpid = lwpid_of (current_inferior);
4838 kill_lwp (lwpid, SIGINT);
4839 }
4840 else
4841 kill_lwp (signal_pid, SIGINT);
4842 }
4843
4844 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4845 to debugger memory starting at MYADDR. */
4846
4847 static int
4848 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4849 {
4850 char filename[PATH_MAX];
4851 int fd, n;
4852 int pid = lwpid_of (current_inferior);
4853
4854 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4855
4856 fd = open (filename, O_RDONLY);
4857 if (fd < 0)
4858 return -1;
4859
4860 if (offset != (CORE_ADDR) 0
4861 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4862 n = -1;
4863 else
4864 n = read (fd, myaddr, len);
4865
4866 close (fd);
4867
4868 return n;
4869 }
4870
4871 /* These breakpoint and watchpoint related wrapper functions simply
4872 pass on the function call if the target has registered a
4873 corresponding function. */
4874
4875 static int
4876 linux_supports_z_point_type (char z_type)
4877 {
4878 return (the_low_target.supports_z_point_type != NULL
4879 && the_low_target.supports_z_point_type (z_type));
4880 }
4881
4882 static int
4883 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4884 int size, struct raw_breakpoint *bp)
4885 {
4886 if (the_low_target.insert_point != NULL)
4887 return the_low_target.insert_point (type, addr, size, bp);
4888 else
4889 /* Unsupported (see target.h). */
4890 return 1;
4891 }
4892
4893 static int
4894 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4895 int size, struct raw_breakpoint *bp)
4896 {
4897 if (the_low_target.remove_point != NULL)
4898 return the_low_target.remove_point (type, addr, size, bp);
4899 else
4900 /* Unsupported (see target.h). */
4901 return 1;
4902 }
4903
4904 static int
4905 linux_stopped_by_watchpoint (void)
4906 {
4907 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4908
4909 return lwp->stopped_by_watchpoint;
4910 }
4911
4912 static CORE_ADDR
4913 linux_stopped_data_address (void)
4914 {
4915 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4916
4917 return lwp->stopped_data_address;
4918 }
4919
4920 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4921 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4922 && defined(PT_TEXT_END_ADDR)
4923
4924 /* This is only used for targets that define PT_TEXT_ADDR,
4925 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4926 the target has different ways of acquiring this information, like
4927 loadmaps. */
4928
4929 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4930 to tell gdb about. */
4931
4932 static int
4933 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4934 {
4935 unsigned long text, text_end, data;
4936 int pid = lwpid_of (get_thread_lwp (current_inferior));
4937
4938 errno = 0;
4939
4940 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4941 (PTRACE_TYPE_ARG4) 0);
4942 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4943 (PTRACE_TYPE_ARG4) 0);
4944 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4945 (PTRACE_TYPE_ARG4) 0);
4946
4947 if (errno == 0)
4948 {
4949 /* Both text and data offsets produced at compile-time (and so
4950 used by gdb) are relative to the beginning of the program,
4951 with the data segment immediately following the text segment.
4952 However, the actual runtime layout in memory may put the data
4953 somewhere else, so when we send gdb a data base-address, we
4954 use the real data base address and subtract the compile-time
4955 data base-address from it (which is just the length of the
4956 text segment). BSS immediately follows data in both
4957 cases. */
4958 *text_p = text;
4959 *data_p = data - (text_end - text);
4960
4961 return 1;
4962 }
4963 return 0;
4964 }
4965 #endif
4966
4967 static int
4968 linux_qxfer_osdata (const char *annex,
4969 unsigned char *readbuf, unsigned const char *writebuf,
4970 CORE_ADDR offset, int len)
4971 {
4972 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4973 }
4974
4975 /* Convert a native/host siginfo object, into/from the siginfo in the
4976 layout of the inferiors' architecture. */
4977
4978 static void
4979 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4980 {
4981 int done = 0;
4982
4983 if (the_low_target.siginfo_fixup != NULL)
4984 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4985
4986 /* If there was no callback, or the callback didn't do anything,
4987 then just do a straight memcpy. */
4988 if (!done)
4989 {
4990 if (direction == 1)
4991 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4992 else
4993 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4994 }
4995 }
4996
4997 static int
4998 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4999 unsigned const char *writebuf, CORE_ADDR offset, int len)
5000 {
5001 int pid;
5002 siginfo_t siginfo;
5003 char inf_siginfo[sizeof (siginfo_t)];
5004
5005 if (current_inferior == NULL)
5006 return -1;
5007
5008 pid = lwpid_of (current_inferior);
5009
5010 if (debug_threads)
5011 debug_printf ("%s siginfo for lwp %d.\n",
5012 readbuf != NULL ? "Reading" : "Writing",
5013 pid);
5014
5015 if (offset >= sizeof (siginfo))
5016 return -1;
5017
5018 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5019 return -1;
5020
5021 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5022 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5023 inferior with a 64-bit GDBSERVER should look the same as debugging it
5024 with a 32-bit GDBSERVER, we need to convert it. */
5025 siginfo_fixup (&siginfo, inf_siginfo, 0);
5026
5027 if (offset + len > sizeof (siginfo))
5028 len = sizeof (siginfo) - offset;
5029
5030 if (readbuf != NULL)
5031 memcpy (readbuf, inf_siginfo + offset, len);
5032 else
5033 {
5034 memcpy (inf_siginfo + offset, writebuf, len);
5035
5036 /* Convert back to ptrace layout before flushing it out. */
5037 siginfo_fixup (&siginfo, inf_siginfo, 1);
5038
5039 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5040 return -1;
5041 }
5042
5043 return len;
5044 }
5045
5046 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5047 so we notice when children change state; as the handler for the
5048 sigsuspend in my_waitpid. */
5049
5050 static void
5051 sigchld_handler (int signo)
5052 {
5053 int old_errno = errno;
5054
5055 if (debug_threads)
5056 {
5057 do
5058 {
5059 /* fprintf is not async-signal-safe, so call write
5060 directly. */
5061 if (write (2, "sigchld_handler\n",
5062 sizeof ("sigchld_handler\n") - 1) < 0)
5063 break; /* just ignore */
5064 } while (0);
5065 }
5066
5067 if (target_is_async_p ())
5068 async_file_mark (); /* trigger a linux_wait */
5069
5070 errno = old_errno;
5071 }
5072
5073 static int
5074 linux_supports_non_stop (void)
5075 {
5076 return 1;
5077 }
5078
5079 static int
5080 linux_async (int enable)
5081 {
5082 int previous = (linux_event_pipe[0] != -1);
5083
5084 if (debug_threads)
5085 debug_printf ("linux_async (%d), previous=%d\n",
5086 enable, previous);
5087
5088 if (previous != enable)
5089 {
5090 sigset_t mask;
5091 sigemptyset (&mask);
5092 sigaddset (&mask, SIGCHLD);
5093
5094 sigprocmask (SIG_BLOCK, &mask, NULL);
5095
5096 if (enable)
5097 {
5098 if (pipe (linux_event_pipe) == -1)
5099 fatal ("creating event pipe failed.");
5100
5101 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5102 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5103
5104 /* Register the event loop handler. */
5105 add_file_handler (linux_event_pipe[0],
5106 handle_target_event, NULL);
5107
5108 /* Always trigger a linux_wait. */
5109 async_file_mark ();
5110 }
5111 else
5112 {
5113 delete_file_handler (linux_event_pipe[0]);
5114
5115 close (linux_event_pipe[0]);
5116 close (linux_event_pipe[1]);
5117 linux_event_pipe[0] = -1;
5118 linux_event_pipe[1] = -1;
5119 }
5120
5121 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5122 }
5123
5124 return previous;
5125 }
5126
5127 static int
5128 linux_start_non_stop (int nonstop)
5129 {
5130 /* Register or unregister from event-loop accordingly. */
5131 linux_async (nonstop);
5132 return 0;
5133 }
5134
5135 static int
5136 linux_supports_multi_process (void)
5137 {
5138 return 1;
5139 }
5140
5141 static int
5142 linux_supports_disable_randomization (void)
5143 {
5144 #ifdef HAVE_PERSONALITY
5145 return 1;
5146 #else
5147 return 0;
5148 #endif
5149 }
5150
5151 static int
5152 linux_supports_agent (void)
5153 {
5154 return 1;
5155 }
5156
5157 static int
5158 linux_supports_range_stepping (void)
5159 {
5160 if (*the_low_target.supports_range_stepping == NULL)
5161 return 0;
5162
5163 return (*the_low_target.supports_range_stepping) ();
5164 }
5165
5166 /* Enumerate spufs IDs for process PID. */
5167 static int
5168 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5169 {
5170 int pos = 0;
5171 int written = 0;
5172 char path[128];
5173 DIR *dir;
5174 struct dirent *entry;
5175
5176 sprintf (path, "/proc/%ld/fd", pid);
5177 dir = opendir (path);
5178 if (!dir)
5179 return -1;
5180
5181 rewinddir (dir);
5182 while ((entry = readdir (dir)) != NULL)
5183 {
5184 struct stat st;
5185 struct statfs stfs;
5186 int fd;
5187
5188 fd = atoi (entry->d_name);
5189 if (!fd)
5190 continue;
5191
5192 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5193 if (stat (path, &st) != 0)
5194 continue;
5195 if (!S_ISDIR (st.st_mode))
5196 continue;
5197
5198 if (statfs (path, &stfs) != 0)
5199 continue;
5200 if (stfs.f_type != SPUFS_MAGIC)
5201 continue;
5202
5203 if (pos >= offset && pos + 4 <= offset + len)
5204 {
5205 *(unsigned int *)(buf + pos - offset) = fd;
5206 written += 4;
5207 }
5208 pos += 4;
5209 }
5210
5211 closedir (dir);
5212 return written;
5213 }
5214
5215 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5216 object type, using the /proc file system. */
5217 static int
5218 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5219 unsigned const char *writebuf,
5220 CORE_ADDR offset, int len)
5221 {
5222 long pid = lwpid_of (current_inferior);
5223 char buf[128];
5224 int fd = 0;
5225 int ret = 0;
5226
5227 if (!writebuf && !readbuf)
5228 return -1;
5229
5230 if (!*annex)
5231 {
5232 if (!readbuf)
5233 return -1;
5234 else
5235 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5236 }
5237
5238 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5239 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5240 if (fd <= 0)
5241 return -1;
5242
5243 if (offset != 0
5244 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5245 {
5246 close (fd);
5247 return 0;
5248 }
5249
5250 if (writebuf)
5251 ret = write (fd, writebuf, (size_t) len);
5252 else
5253 ret = read (fd, readbuf, (size_t) len);
5254
5255 close (fd);
5256 return ret;
5257 }
5258
5259 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5260 struct target_loadseg
5261 {
5262 /* Core address to which the segment is mapped. */
5263 Elf32_Addr addr;
5264 /* VMA recorded in the program header. */
5265 Elf32_Addr p_vaddr;
5266 /* Size of this segment in memory. */
5267 Elf32_Word p_memsz;
5268 };
5269
5270 # if defined PT_GETDSBT
5271 struct target_loadmap
5272 {
5273 /* Protocol version number, must be zero. */
5274 Elf32_Word version;
5275 /* Pointer to the DSBT table, its size, and the DSBT index. */
5276 unsigned *dsbt_table;
5277 unsigned dsbt_size, dsbt_index;
5278 /* Number of segments in this map. */
5279 Elf32_Word nsegs;
5280 /* The actual memory map. */
5281 struct target_loadseg segs[/*nsegs*/];
5282 };
5283 # define LINUX_LOADMAP PT_GETDSBT
5284 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5285 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5286 # else
5287 struct target_loadmap
5288 {
5289 /* Protocol version number, must be zero. */
5290 Elf32_Half version;
5291 /* Number of segments in this map. */
5292 Elf32_Half nsegs;
5293 /* The actual memory map. */
5294 struct target_loadseg segs[/*nsegs*/];
5295 };
5296 # define LINUX_LOADMAP PTRACE_GETFDPIC
5297 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5298 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5299 # endif
5300
5301 static int
5302 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5303 unsigned char *myaddr, unsigned int len)
5304 {
5305 int pid = lwpid_of (current_inferior);
5306 int addr = -1;
5307 struct target_loadmap *data = NULL;
5308 unsigned int actual_length, copy_length;
5309
5310 if (strcmp (annex, "exec") == 0)
5311 addr = (int) LINUX_LOADMAP_EXEC;
5312 else if (strcmp (annex, "interp") == 0)
5313 addr = (int) LINUX_LOADMAP_INTERP;
5314 else
5315 return -1;
5316
5317 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5318 return -1;
5319
5320 if (data == NULL)
5321 return -1;
5322
5323 actual_length = sizeof (struct target_loadmap)
5324 + sizeof (struct target_loadseg) * data->nsegs;
5325
5326 if (offset < 0 || offset > actual_length)
5327 return -1;
5328
5329 copy_length = actual_length - offset < len ? actual_length - offset : len;
5330 memcpy (myaddr, (char *) data + offset, copy_length);
5331 return copy_length;
5332 }
5333 #else
5334 # define linux_read_loadmap NULL
5335 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5336
5337 static void
5338 linux_process_qsupported (const char *query)
5339 {
5340 if (the_low_target.process_qsupported != NULL)
5341 the_low_target.process_qsupported (query);
5342 }
5343
5344 static int
5345 linux_supports_tracepoints (void)
5346 {
5347 if (*the_low_target.supports_tracepoints == NULL)
5348 return 0;
5349
5350 return (*the_low_target.supports_tracepoints) ();
5351 }
5352
5353 static CORE_ADDR
5354 linux_read_pc (struct regcache *regcache)
5355 {
5356 if (the_low_target.get_pc == NULL)
5357 return 0;
5358
5359 return (*the_low_target.get_pc) (regcache);
5360 }
5361
5362 static void
5363 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5364 {
5365 gdb_assert (the_low_target.set_pc != NULL);
5366
5367 (*the_low_target.set_pc) (regcache, pc);
5368 }
5369
5370 static int
5371 linux_thread_stopped (struct thread_info *thread)
5372 {
5373 return get_thread_lwp (thread)->stopped;
5374 }
5375
5376 /* This exposes stop-all-threads functionality to other modules. */
5377
5378 static void
5379 linux_pause_all (int freeze)
5380 {
5381 stop_all_lwps (freeze, NULL);
5382 }
5383
5384 /* This exposes unstop-all-threads functionality to other gdbserver
5385 modules. */
5386
5387 static void
5388 linux_unpause_all (int unfreeze)
5389 {
5390 unstop_all_lwps (unfreeze, NULL);
5391 }
5392
5393 static int
5394 linux_prepare_to_access_memory (void)
5395 {
5396 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5397 running LWP. */
5398 if (non_stop)
5399 linux_pause_all (1);
5400 return 0;
5401 }
5402
5403 static void
5404 linux_done_accessing_memory (void)
5405 {
5406 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5407 running LWP. */
5408 if (non_stop)
5409 linux_unpause_all (1);
5410 }
5411
5412 static int
5413 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5414 CORE_ADDR collector,
5415 CORE_ADDR lockaddr,
5416 ULONGEST orig_size,
5417 CORE_ADDR *jump_entry,
5418 CORE_ADDR *trampoline,
5419 ULONGEST *trampoline_size,
5420 unsigned char *jjump_pad_insn,
5421 ULONGEST *jjump_pad_insn_size,
5422 CORE_ADDR *adjusted_insn_addr,
5423 CORE_ADDR *adjusted_insn_addr_end,
5424 char *err)
5425 {
5426 return (*the_low_target.install_fast_tracepoint_jump_pad)
5427 (tpoint, tpaddr, collector, lockaddr, orig_size,
5428 jump_entry, trampoline, trampoline_size,
5429 jjump_pad_insn, jjump_pad_insn_size,
5430 adjusted_insn_addr, adjusted_insn_addr_end,
5431 err);
5432 }
5433
5434 static struct emit_ops *
5435 linux_emit_ops (void)
5436 {
5437 if (the_low_target.emit_ops != NULL)
5438 return (*the_low_target.emit_ops) ();
5439 else
5440 return NULL;
5441 }
5442
5443 static int
5444 linux_get_min_fast_tracepoint_insn_len (void)
5445 {
5446 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5447 }
5448
5449 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5450
5451 static int
5452 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5453 CORE_ADDR *phdr_memaddr, int *num_phdr)
5454 {
5455 char filename[PATH_MAX];
5456 int fd;
5457 const int auxv_size = is_elf64
5458 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5459 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5460
5461 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5462
5463 fd = open (filename, O_RDONLY);
5464 if (fd < 0)
5465 return 1;
5466
5467 *phdr_memaddr = 0;
5468 *num_phdr = 0;
5469 while (read (fd, buf, auxv_size) == auxv_size
5470 && (*phdr_memaddr == 0 || *num_phdr == 0))
5471 {
5472 if (is_elf64)
5473 {
5474 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5475
5476 switch (aux->a_type)
5477 {
5478 case AT_PHDR:
5479 *phdr_memaddr = aux->a_un.a_val;
5480 break;
5481 case AT_PHNUM:
5482 *num_phdr = aux->a_un.a_val;
5483 break;
5484 }
5485 }
5486 else
5487 {
5488 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5489
5490 switch (aux->a_type)
5491 {
5492 case AT_PHDR:
5493 *phdr_memaddr = aux->a_un.a_val;
5494 break;
5495 case AT_PHNUM:
5496 *num_phdr = aux->a_un.a_val;
5497 break;
5498 }
5499 }
5500 }
5501
5502 close (fd);
5503
5504 if (*phdr_memaddr == 0 || *num_phdr == 0)
5505 {
5506 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5507 "phdr_memaddr = %ld, phdr_num = %d",
5508 (long) *phdr_memaddr, *num_phdr);
5509 return 2;
5510 }
5511
5512 return 0;
5513 }
5514
5515 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5516
5517 static CORE_ADDR
5518 get_dynamic (const int pid, const int is_elf64)
5519 {
5520 CORE_ADDR phdr_memaddr, relocation;
5521 int num_phdr, i;
5522 unsigned char *phdr_buf;
5523 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5524
5525 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5526 return 0;
5527
5528 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5529 phdr_buf = alloca (num_phdr * phdr_size);
5530
5531 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5532 return 0;
5533
5534 /* Compute relocation: it is expected to be 0 for "regular" executables,
5535 non-zero for PIE ones. */
5536 relocation = -1;
5537 for (i = 0; relocation == -1 && i < num_phdr; i++)
5538 if (is_elf64)
5539 {
5540 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5541
5542 if (p->p_type == PT_PHDR)
5543 relocation = phdr_memaddr - p->p_vaddr;
5544 }
5545 else
5546 {
5547 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5548
5549 if (p->p_type == PT_PHDR)
5550 relocation = phdr_memaddr - p->p_vaddr;
5551 }
5552
5553 if (relocation == -1)
5554 {
5555 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5556 any real world executables, including PIE executables, have always
5557 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5558 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5559 or present DT_DEBUG anyway (fpc binaries are statically linked).
5560
5561 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5562
5563 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5564
5565 return 0;
5566 }
5567
5568 for (i = 0; i < num_phdr; i++)
5569 {
5570 if (is_elf64)
5571 {
5572 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5573
5574 if (p->p_type == PT_DYNAMIC)
5575 return p->p_vaddr + relocation;
5576 }
5577 else
5578 {
5579 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5580
5581 if (p->p_type == PT_DYNAMIC)
5582 return p->p_vaddr + relocation;
5583 }
5584 }
5585
5586 return 0;
5587 }
5588
5589 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5590 can be 0 if the inferior does not yet have the library list initialized.
5591 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5592 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5593
5594 static CORE_ADDR
5595 get_r_debug (const int pid, const int is_elf64)
5596 {
5597 CORE_ADDR dynamic_memaddr;
5598 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5599 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5600 CORE_ADDR map = -1;
5601
5602 dynamic_memaddr = get_dynamic (pid, is_elf64);
5603 if (dynamic_memaddr == 0)
5604 return map;
5605
5606 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5607 {
5608 if (is_elf64)
5609 {
5610 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5611 #ifdef DT_MIPS_RLD_MAP
5612 union
5613 {
5614 Elf64_Xword map;
5615 unsigned char buf[sizeof (Elf64_Xword)];
5616 }
5617 rld_map;
5618
5619 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5620 {
5621 if (linux_read_memory (dyn->d_un.d_val,
5622 rld_map.buf, sizeof (rld_map.buf)) == 0)
5623 return rld_map.map;
5624 else
5625 break;
5626 }
5627 #endif /* DT_MIPS_RLD_MAP */
5628
5629 if (dyn->d_tag == DT_DEBUG && map == -1)
5630 map = dyn->d_un.d_val;
5631
5632 if (dyn->d_tag == DT_NULL)
5633 break;
5634 }
5635 else
5636 {
5637 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5638 #ifdef DT_MIPS_RLD_MAP
5639 union
5640 {
5641 Elf32_Word map;
5642 unsigned char buf[sizeof (Elf32_Word)];
5643 }
5644 rld_map;
5645
5646 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5647 {
5648 if (linux_read_memory (dyn->d_un.d_val,
5649 rld_map.buf, sizeof (rld_map.buf)) == 0)
5650 return rld_map.map;
5651 else
5652 break;
5653 }
5654 #endif /* DT_MIPS_RLD_MAP */
5655
5656 if (dyn->d_tag == DT_DEBUG && map == -1)
5657 map = dyn->d_un.d_val;
5658
5659 if (dyn->d_tag == DT_NULL)
5660 break;
5661 }
5662
5663 dynamic_memaddr += dyn_size;
5664 }
5665
5666 return map;
5667 }
5668
5669 /* Read one pointer from MEMADDR in the inferior. */
5670
5671 static int
5672 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5673 {
5674 int ret;
5675
5676 /* Go through a union so this works on either big or little endian
5677 hosts, when the inferior's pointer size is smaller than the size
5678 of CORE_ADDR. It is assumed the inferior's endianness is the
5679 same of the superior's. */
5680 union
5681 {
5682 CORE_ADDR core_addr;
5683 unsigned int ui;
5684 unsigned char uc;
5685 } addr;
5686
5687 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5688 if (ret == 0)
5689 {
5690 if (ptr_size == sizeof (CORE_ADDR))
5691 *ptr = addr.core_addr;
5692 else if (ptr_size == sizeof (unsigned int))
5693 *ptr = addr.ui;
5694 else
5695 gdb_assert_not_reached ("unhandled pointer size");
5696 }
5697 return ret;
5698 }
5699
5700 struct link_map_offsets
5701 {
5702 /* Offset and size of r_debug.r_version. */
5703 int r_version_offset;
5704
5705 /* Offset and size of r_debug.r_map. */
5706 int r_map_offset;
5707
5708 /* Offset to l_addr field in struct link_map. */
5709 int l_addr_offset;
5710
5711 /* Offset to l_name field in struct link_map. */
5712 int l_name_offset;
5713
5714 /* Offset to l_ld field in struct link_map. */
5715 int l_ld_offset;
5716
5717 /* Offset to l_next field in struct link_map. */
5718 int l_next_offset;
5719
5720 /* Offset to l_prev field in struct link_map. */
5721 int l_prev_offset;
5722 };
5723
5724 /* Construct qXfer:libraries-svr4:read reply. */
5725
5726 static int
5727 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5728 unsigned const char *writebuf,
5729 CORE_ADDR offset, int len)
5730 {
5731 char *document;
5732 unsigned document_len;
5733 struct process_info_private *const priv = current_process ()->private;
5734 char filename[PATH_MAX];
5735 int pid, is_elf64;
5736
5737 static const struct link_map_offsets lmo_32bit_offsets =
5738 {
5739 0, /* r_version offset. */
5740 4, /* r_debug.r_map offset. */
5741 0, /* l_addr offset in link_map. */
5742 4, /* l_name offset in link_map. */
5743 8, /* l_ld offset in link_map. */
5744 12, /* l_next offset in link_map. */
5745 16 /* l_prev offset in link_map. */
5746 };
5747
5748 static const struct link_map_offsets lmo_64bit_offsets =
5749 {
5750 0, /* r_version offset. */
5751 8, /* r_debug.r_map offset. */
5752 0, /* l_addr offset in link_map. */
5753 8, /* l_name offset in link_map. */
5754 16, /* l_ld offset in link_map. */
5755 24, /* l_next offset in link_map. */
5756 32 /* l_prev offset in link_map. */
5757 };
5758 const struct link_map_offsets *lmo;
5759 unsigned int machine;
5760 int ptr_size;
5761 CORE_ADDR lm_addr = 0, lm_prev = 0;
5762 int allocated = 1024;
5763 char *p;
5764 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5765 int header_done = 0;
5766
5767 if (writebuf != NULL)
5768 return -2;
5769 if (readbuf == NULL)
5770 return -1;
5771
5772 pid = lwpid_of (current_inferior);
5773 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5774 is_elf64 = elf_64_file_p (filename, &machine);
5775 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5776 ptr_size = is_elf64 ? 8 : 4;
5777
5778 while (annex[0] != '\0')
5779 {
5780 const char *sep;
5781 CORE_ADDR *addrp;
5782 int len;
5783
5784 sep = strchr (annex, '=');
5785 if (sep == NULL)
5786 break;
5787
5788 len = sep - annex;
5789 if (len == 5 && strncmp (annex, "start", 5) == 0)
5790 addrp = &lm_addr;
5791 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5792 addrp = &lm_prev;
5793 else
5794 {
5795 annex = strchr (sep, ';');
5796 if (annex == NULL)
5797 break;
5798 annex++;
5799 continue;
5800 }
5801
5802 annex = decode_address_to_semicolon (addrp, sep + 1);
5803 }
5804
5805 if (lm_addr == 0)
5806 {
5807 int r_version = 0;
5808
5809 if (priv->r_debug == 0)
5810 priv->r_debug = get_r_debug (pid, is_elf64);
5811
5812 /* We failed to find DT_DEBUG. Such situation will not change
5813 for this inferior - do not retry it. Report it to GDB as
5814 E01, see for the reasons at the GDB solib-svr4.c side. */
5815 if (priv->r_debug == (CORE_ADDR) -1)
5816 return -1;
5817
5818 if (priv->r_debug != 0)
5819 {
5820 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5821 (unsigned char *) &r_version,
5822 sizeof (r_version)) != 0
5823 || r_version != 1)
5824 {
5825 warning ("unexpected r_debug version %d", r_version);
5826 }
5827 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5828 &lm_addr, ptr_size) != 0)
5829 {
5830 warning ("unable to read r_map from 0x%lx",
5831 (long) priv->r_debug + lmo->r_map_offset);
5832 }
5833 }
5834 }
5835
5836 document = xmalloc (allocated);
5837 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5838 p = document + strlen (document);
5839
5840 while (lm_addr
5841 && read_one_ptr (lm_addr + lmo->l_name_offset,
5842 &l_name, ptr_size) == 0
5843 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5844 &l_addr, ptr_size) == 0
5845 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5846 &l_ld, ptr_size) == 0
5847 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5848 &l_prev, ptr_size) == 0
5849 && read_one_ptr (lm_addr + lmo->l_next_offset,
5850 &l_next, ptr_size) == 0)
5851 {
5852 unsigned char libname[PATH_MAX];
5853
5854 if (lm_prev != l_prev)
5855 {
5856 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5857 (long) lm_prev, (long) l_prev);
5858 break;
5859 }
5860
5861 /* Ignore the first entry even if it has valid name as the first entry
5862 corresponds to the main executable. The first entry should not be
5863 skipped if the dynamic loader was loaded late by a static executable
5864 (see solib-svr4.c parameter ignore_first). But in such case the main
5865 executable does not have PT_DYNAMIC present and this function already
5866 exited above due to failed get_r_debug. */
5867 if (lm_prev == 0)
5868 {
5869 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5870 p = p + strlen (p);
5871 }
5872 else
5873 {
5874 /* Not checking for error because reading may stop before
5875 we've got PATH_MAX worth of characters. */
5876 libname[0] = '\0';
5877 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5878 libname[sizeof (libname) - 1] = '\0';
5879 if (libname[0] != '\0')
5880 {
5881 /* 6x the size for xml_escape_text below. */
5882 size_t len = 6 * strlen ((char *) libname);
5883 char *name;
5884
5885 if (!header_done)
5886 {
5887 /* Terminate `<library-list-svr4'. */
5888 *p++ = '>';
5889 header_done = 1;
5890 }
5891
5892 while (allocated < p - document + len + 200)
5893 {
5894 /* Expand to guarantee sufficient storage. */
5895 uintptr_t document_len = p - document;
5896
5897 document = xrealloc (document, 2 * allocated);
5898 allocated *= 2;
5899 p = document + document_len;
5900 }
5901
5902 name = xml_escape_text ((char *) libname);
5903 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5904 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5905 name, (unsigned long) lm_addr,
5906 (unsigned long) l_addr, (unsigned long) l_ld);
5907 free (name);
5908 }
5909 }
5910
5911 lm_prev = lm_addr;
5912 lm_addr = l_next;
5913 }
5914
5915 if (!header_done)
5916 {
5917 /* Empty list; terminate `<library-list-svr4'. */
5918 strcpy (p, "/>");
5919 }
5920 else
5921 strcpy (p, "</library-list-svr4>");
5922
5923 document_len = strlen (document);
5924 if (offset < document_len)
5925 document_len -= offset;
5926 else
5927 document_len = 0;
5928 if (len > document_len)
5929 len = document_len;
5930
5931 memcpy (readbuf, document + offset, len);
5932 xfree (document);
5933
5934 return len;
5935 }
5936
5937 #ifdef HAVE_LINUX_BTRACE
5938
5939 /* See to_enable_btrace target method. */
5940
5941 static struct btrace_target_info *
5942 linux_low_enable_btrace (ptid_t ptid)
5943 {
5944 struct btrace_target_info *tinfo;
5945
5946 tinfo = linux_enable_btrace (ptid);
5947
5948 if (tinfo != NULL)
5949 {
5950 struct thread_info *thread = find_thread_ptid (ptid);
5951 struct regcache *regcache = get_thread_regcache (thread, 0);
5952
5953 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5954 }
5955
5956 return tinfo;
5957 }
5958
5959 /* See to_disable_btrace target method. */
5960
5961 static int
5962 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5963 {
5964 enum btrace_error err;
5965
5966 err = linux_disable_btrace (tinfo);
5967 return (err == BTRACE_ERR_NONE ? 0 : -1);
5968 }
5969
5970 /* See to_read_btrace target method. */
5971
5972 static int
5973 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5974 int type)
5975 {
5976 VEC (btrace_block_s) *btrace;
5977 struct btrace_block *block;
5978 enum btrace_error err;
5979 int i;
5980
5981 btrace = NULL;
5982 err = linux_read_btrace (&btrace, tinfo, type);
5983 if (err != BTRACE_ERR_NONE)
5984 {
5985 if (err == BTRACE_ERR_OVERFLOW)
5986 buffer_grow_str0 (buffer, "E.Overflow.");
5987 else
5988 buffer_grow_str0 (buffer, "E.Generic Error.");
5989
5990 return -1;
5991 }
5992
5993 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5994 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5995
5996 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5997 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5998 paddress (block->begin), paddress (block->end));
5999
6000 buffer_grow_str0 (buffer, "</btrace>\n");
6001
6002 VEC_free (btrace_block_s, btrace);
6003
6004 return 0;
6005 }
6006 #endif /* HAVE_LINUX_BTRACE */
6007
6008 static struct target_ops linux_target_ops = {
6009 linux_create_inferior,
6010 linux_attach,
6011 linux_kill,
6012 linux_detach,
6013 linux_mourn,
6014 linux_join,
6015 linux_thread_alive,
6016 linux_resume,
6017 linux_wait,
6018 linux_fetch_registers,
6019 linux_store_registers,
6020 linux_prepare_to_access_memory,
6021 linux_done_accessing_memory,
6022 linux_read_memory,
6023 linux_write_memory,
6024 linux_look_up_symbols,
6025 linux_request_interrupt,
6026 linux_read_auxv,
6027 linux_supports_z_point_type,
6028 linux_insert_point,
6029 linux_remove_point,
6030 linux_stopped_by_watchpoint,
6031 linux_stopped_data_address,
6032 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6033 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6034 && defined(PT_TEXT_END_ADDR)
6035 linux_read_offsets,
6036 #else
6037 NULL,
6038 #endif
6039 #ifdef USE_THREAD_DB
6040 thread_db_get_tls_address,
6041 #else
6042 NULL,
6043 #endif
6044 linux_qxfer_spu,
6045 hostio_last_error_from_errno,
6046 linux_qxfer_osdata,
6047 linux_xfer_siginfo,
6048 linux_supports_non_stop,
6049 linux_async,
6050 linux_start_non_stop,
6051 linux_supports_multi_process,
6052 #ifdef USE_THREAD_DB
6053 thread_db_handle_monitor_command,
6054 #else
6055 NULL,
6056 #endif
6057 linux_common_core_of_thread,
6058 linux_read_loadmap,
6059 linux_process_qsupported,
6060 linux_supports_tracepoints,
6061 linux_read_pc,
6062 linux_write_pc,
6063 linux_thread_stopped,
6064 NULL,
6065 linux_pause_all,
6066 linux_unpause_all,
6067 linux_cancel_breakpoints,
6068 linux_stabilize_threads,
6069 linux_install_fast_tracepoint_jump_pad,
6070 linux_emit_ops,
6071 linux_supports_disable_randomization,
6072 linux_get_min_fast_tracepoint_insn_len,
6073 linux_qxfer_libraries_svr4,
6074 linux_supports_agent,
6075 #ifdef HAVE_LINUX_BTRACE
6076 linux_supports_btrace,
6077 linux_low_enable_btrace,
6078 linux_low_disable_btrace,
6079 linux_low_read_btrace,
6080 #else
6081 NULL,
6082 NULL,
6083 NULL,
6084 NULL,
6085 #endif
6086 linux_supports_range_stepping,
6087 };
6088
6089 static void
6090 linux_init_signals ()
6091 {
6092 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6093 to find what the cancel signal actually is. */
6094 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6095 signal (__SIGRTMIN+1, SIG_IGN);
6096 #endif
6097 }
6098
6099 #ifdef HAVE_LINUX_REGSETS
6100 void
6101 initialize_regsets_info (struct regsets_info *info)
6102 {
6103 for (info->num_regsets = 0;
6104 info->regsets[info->num_regsets].size >= 0;
6105 info->num_regsets++)
6106 ;
6107 }
6108 #endif
6109
6110 void
6111 initialize_low (void)
6112 {
6113 struct sigaction sigchld_action;
6114 memset (&sigchld_action, 0, sizeof (sigchld_action));
6115 set_target_ops (&linux_target_ops);
6116 set_breakpoint_data (the_low_target.breakpoint,
6117 the_low_target.breakpoint_len);
6118 linux_init_signals ();
6119 linux_ptrace_init_warnings ();
6120
6121 sigchld_action.sa_handler = sigchld_handler;
6122 sigemptyset (&sigchld_action.sa_mask);
6123 sigchld_action.sa_flags = SA_RESTART;
6124 sigaction (SIGCHLD, &sigchld_action, NULL);
6125
6126 initialize_low_arch ();
6127 }
This page took 0.159861 seconds and 4 git commands to generate.