Extended-remote follow vfork
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24
25 #include "nat/linux-nat.h"
26 #include "nat/linux-waitpid.h"
27 #include "gdb_wait.h"
28 #include <sys/ptrace.h>
29 #include "nat/linux-ptrace.h"
30 #include "nat/linux-procfs.h"
31 #include "nat/linux-personality.h"
32 #include <signal.h>
33 #include <sys/ioctl.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #include "filestuff.h"
46 #include "tracepoint.h"
47 #include "hostio.h"
48 #ifndef ELFMAG0
49 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
53 #include <elf.h>
54 #endif
55
56 #ifndef SPUFS_MAGIC
57 #define SPUFS_MAGIC 0x23c9b64e
58 #endif
59
60 #ifdef HAVE_PERSONALITY
61 # include <sys/personality.h>
62 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
63 # define ADDR_NO_RANDOMIZE 0x0040000
64 # endif
65 #endif
66
67 #ifndef O_LARGEFILE
68 #define O_LARGEFILE 0
69 #endif
70
71 #ifndef W_STOPCODE
72 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
73 #endif
74
75 /* This is the kernel's hard limit. Not to be confused with
76 SIGRTMIN. */
77 #ifndef __SIGRTMIN
78 #define __SIGRTMIN 32
79 #endif
80
81 /* Some targets did not define these ptrace constants from the start,
82 so gdbserver defines them locally here. In the future, these may
83 be removed after they are added to asm/ptrace.h. */
84 #if !(defined(PT_TEXT_ADDR) \
85 || defined(PT_DATA_ADDR) \
86 || defined(PT_TEXT_END_ADDR))
87 #if defined(__mcoldfire__)
88 /* These are still undefined in 3.10 kernels. */
89 #define PT_TEXT_ADDR 49*4
90 #define PT_DATA_ADDR 50*4
91 #define PT_TEXT_END_ADDR 51*4
92 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #elif defined(BFIN)
94 #define PT_TEXT_ADDR 220
95 #define PT_TEXT_END_ADDR 224
96 #define PT_DATA_ADDR 228
97 /* These are still undefined in 3.10 kernels. */
98 #elif defined(__TMS320C6X__)
99 #define PT_TEXT_ADDR (0x10000*4)
100 #define PT_DATA_ADDR (0x10004*4)
101 #define PT_TEXT_END_ADDR (0x10008*4)
102 #endif
103 #endif
104
105 #ifdef HAVE_LINUX_BTRACE
106 # include "nat/linux-btrace.h"
107 # include "btrace-common.h"
108 #endif
109
110 #ifndef HAVE_ELF32_AUXV_T
111 /* Copied from glibc's elf.h. */
112 typedef struct
113 {
114 uint32_t a_type; /* Entry type */
115 union
116 {
117 uint32_t a_val; /* Integer value */
118 /* We use to have pointer elements added here. We cannot do that,
119 though, since it does not work when using 32-bit definitions
120 on 64-bit platforms and vice versa. */
121 } a_un;
122 } Elf32_auxv_t;
123 #endif
124
125 #ifndef HAVE_ELF64_AUXV_T
126 /* Copied from glibc's elf.h. */
127 typedef struct
128 {
129 uint64_t a_type; /* Entry type */
130 union
131 {
132 uint64_t a_val; /* Integer value */
133 /* We use to have pointer elements added here. We cannot do that,
134 though, since it does not work when using 32-bit definitions
135 on 64-bit platforms and vice versa. */
136 } a_un;
137 } Elf64_auxv_t;
138 #endif
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* A list of all unknown processes which receive stop signals. Some
184 other process will presumably claim each of these as forked
185 children momentarily. */
186
187 struct simple_pid_list
188 {
189 /* The process ID. */
190 int pid;
191
192 /* The status as reported by waitpid. */
193 int status;
194
195 /* Next in chain. */
196 struct simple_pid_list *next;
197 };
198 struct simple_pid_list *stopped_pids;
199
200 /* Trivial list manipulation functions to keep track of a list of new
201 stopped processes. */
202
203 static void
204 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
205 {
206 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
207
208 new_pid->pid = pid;
209 new_pid->status = status;
210 new_pid->next = *listp;
211 *listp = new_pid;
212 }
213
214 static int
215 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
216 {
217 struct simple_pid_list **p;
218
219 for (p = listp; *p != NULL; p = &(*p)->next)
220 if ((*p)->pid == pid)
221 {
222 struct simple_pid_list *next = (*p)->next;
223
224 *statusp = (*p)->status;
225 xfree (*p);
226 *p = next;
227 return 1;
228 }
229 return 0;
230 }
231
232 enum stopping_threads_kind
233 {
234 /* Not stopping threads presently. */
235 NOT_STOPPING_THREADS,
236
237 /* Stopping threads. */
238 STOPPING_THREADS,
239
240 /* Stopping and suspending threads. */
241 STOPPING_AND_SUSPENDING_THREADS
242 };
243
244 /* This is set while stop_all_lwps is in effect. */
245 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
246
247 /* FIXME make into a target method? */
248 int using_threads = 1;
249
250 /* True if we're presently stabilizing threads (moving them out of
251 jump pads). */
252 static int stabilizing_threads;
253
254 static void linux_resume_one_lwp (struct lwp_info *lwp,
255 int step, int signal, siginfo_t *info);
256 static void linux_resume (struct thread_resume *resume_info, size_t n);
257 static void stop_all_lwps (int suspend, struct lwp_info *except);
258 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
259 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
260 int *wstat, int options);
261 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
262 static struct lwp_info *add_lwp (ptid_t ptid);
263 static int linux_stopped_by_watchpoint (void);
264 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
265 static void proceed_all_lwps (void);
266 static int finish_step_over (struct lwp_info *lwp);
267 static int kill_lwp (unsigned long lwpid, int signo);
268
269 /* When the event-loop is doing a step-over, this points at the thread
270 being stepped. */
271 ptid_t step_over_bkpt;
272
273 /* True if the low target can hardware single-step. Such targets
274 don't need a BREAKPOINT_REINSERT_ADDR callback. */
275
276 static int
277 can_hardware_single_step (void)
278 {
279 return (the_low_target.breakpoint_reinsert_addr == NULL);
280 }
281
282 /* True if the low target supports memory breakpoints. If so, we'll
283 have a GET_PC implementation. */
284
285 static int
286 supports_breakpoints (void)
287 {
288 return (the_low_target.get_pc != NULL);
289 }
290
291 /* Returns true if this target can support fast tracepoints. This
292 does not mean that the in-process agent has been loaded in the
293 inferior. */
294
295 static int
296 supports_fast_tracepoints (void)
297 {
298 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 struct pending_signals
312 {
313 int signal;
314 siginfo_t info;
315 struct pending_signals *prev;
316 };
317
318 /* The read/write ends of the pipe registered as waitable file in the
319 event loop. */
320 static int linux_event_pipe[2] = { -1, -1 };
321
322 /* True if we're currently in async mode. */
323 #define target_is_async_p() (linux_event_pipe[0] != -1)
324
325 static void send_sigstop (struct lwp_info *lwp);
326 static void wait_for_sigstop (void);
327
328 /* Return non-zero if HEADER is a 64-bit ELF file. */
329
330 static int
331 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
332 {
333 if (header->e_ident[EI_MAG0] == ELFMAG0
334 && header->e_ident[EI_MAG1] == ELFMAG1
335 && header->e_ident[EI_MAG2] == ELFMAG2
336 && header->e_ident[EI_MAG3] == ELFMAG3)
337 {
338 *machine = header->e_machine;
339 return header->e_ident[EI_CLASS] == ELFCLASS64;
340
341 }
342 *machine = EM_NONE;
343 return -1;
344 }
345
346 /* Return non-zero if FILE is a 64-bit ELF file,
347 zero if the file is not a 64-bit ELF file,
348 and -1 if the file is not accessible or doesn't exist. */
349
350 static int
351 elf_64_file_p (const char *file, unsigned int *machine)
352 {
353 Elf64_Ehdr header;
354 int fd;
355
356 fd = open (file, O_RDONLY);
357 if (fd < 0)
358 return -1;
359
360 if (read (fd, &header, sizeof (header)) != sizeof (header))
361 {
362 close (fd);
363 return 0;
364 }
365 close (fd);
366
367 return elf_64_header_p (&header, machine);
368 }
369
370 /* Accepts an integer PID; Returns true if the executable PID is
371 running is a 64-bit ELF file.. */
372
373 int
374 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
375 {
376 char file[PATH_MAX];
377
378 sprintf (file, "/proc/%d/exe", pid);
379 return elf_64_file_p (file, machine);
380 }
381
382 static void
383 delete_lwp (struct lwp_info *lwp)
384 {
385 struct thread_info *thr = get_lwp_thread (lwp);
386
387 if (debug_threads)
388 debug_printf ("deleting %ld\n", lwpid_of (thr));
389
390 remove_thread (thr);
391 free (lwp->arch_private);
392 free (lwp);
393 }
394
395 /* Add a process to the common process list, and set its private
396 data. */
397
398 static struct process_info *
399 linux_add_process (int pid, int attached)
400 {
401 struct process_info *proc;
402
403 proc = add_process (pid, attached);
404 proc->priv = xcalloc (1, sizeof (*proc->priv));
405
406 /* Set the arch when the first LWP stops. */
407 proc->priv->new_inferior = 1;
408
409 if (the_low_target.new_process != NULL)
410 proc->priv->arch_private = the_low_target.new_process ();
411
412 return proc;
413 }
414
415 static CORE_ADDR get_pc (struct lwp_info *lwp);
416
417 /* Handle a GNU/Linux extended wait response. If we see a clone
418 event, we need to add the new LWP to our list (and return 0 so as
419 not to report the trap to higher layers). */
420
421 static int
422 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
423 {
424 int event = linux_ptrace_get_extended_event (wstat);
425 struct thread_info *event_thr = get_lwp_thread (event_lwp);
426 struct lwp_info *new_lwp;
427
428 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
429 || (event == PTRACE_EVENT_CLONE))
430 {
431 ptid_t ptid;
432 unsigned long new_pid;
433 int ret, status;
434
435 /* Get the pid of the new lwp. */
436 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
437 &new_pid);
438
439 /* If we haven't already seen the new PID stop, wait for it now. */
440 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
441 {
442 /* The new child has a pending SIGSTOP. We can't affect it until it
443 hits the SIGSTOP, but we're already attached. */
444
445 ret = my_waitpid (new_pid, &status, __WALL);
446
447 if (ret == -1)
448 perror_with_name ("waiting for new child");
449 else if (ret != new_pid)
450 warning ("wait returned unexpected PID %d", ret);
451 else if (!WIFSTOPPED (status))
452 warning ("wait returned unexpected status 0x%x", status);
453 }
454
455 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
456 {
457 struct process_info *parent_proc;
458 struct process_info *child_proc;
459 struct lwp_info *child_lwp;
460 struct target_desc *tdesc;
461
462 ptid = ptid_build (new_pid, new_pid, 0);
463
464 if (debug_threads)
465 {
466 debug_printf ("HEW: Got fork event from LWP %ld, "
467 "new child is %d\n",
468 ptid_get_lwp (ptid_of (event_thr)),
469 ptid_get_pid (ptid));
470 }
471
472 /* Add the new process to the tables and clone the breakpoint
473 lists of the parent. We need to do this even if the new process
474 will be detached, since we will need the process object and the
475 breakpoints to remove any breakpoints from memory when we
476 detach, and the client side will access registers. */
477 child_proc = linux_add_process (new_pid, 0);
478 gdb_assert (child_proc != NULL);
479 child_lwp = add_lwp (ptid);
480 gdb_assert (child_lwp != NULL);
481 child_lwp->stopped = 1;
482 parent_proc = get_thread_process (event_thr);
483 child_proc->attached = parent_proc->attached;
484 clone_all_breakpoints (&child_proc->breakpoints,
485 &child_proc->raw_breakpoints,
486 parent_proc->breakpoints);
487
488 tdesc = xmalloc (sizeof (struct target_desc));
489 copy_target_description (tdesc, parent_proc->tdesc);
490 child_proc->tdesc = tdesc;
491 child_lwp->must_set_ptrace_flags = 1;
492
493 /* Clone arch-specific process data. */
494 if (the_low_target.new_fork != NULL)
495 the_low_target.new_fork (parent_proc, child_proc);
496
497 /* Save fork info in the parent thread. */
498 if (event == PTRACE_EVENT_FORK)
499 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
500 else if (event == PTRACE_EVENT_VFORK)
501 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
502
503 event_lwp->waitstatus.value.related_pid = ptid;
504
505 /* The status_pending field contains bits denoting the
506 extended event, so when the pending event is handled,
507 the handler will look at lwp->waitstatus. */
508 event_lwp->status_pending_p = 1;
509 event_lwp->status_pending = wstat;
510
511 /* Report the event. */
512 return 0;
513 }
514
515 if (debug_threads)
516 debug_printf ("HEW: Got clone event "
517 "from LWP %ld, new child is LWP %ld\n",
518 lwpid_of (event_thr), new_pid);
519
520 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
521 new_lwp = add_lwp (ptid);
522
523 /* Either we're going to immediately resume the new thread
524 or leave it stopped. linux_resume_one_lwp is a nop if it
525 thinks the thread is currently running, so set this first
526 before calling linux_resume_one_lwp. */
527 new_lwp->stopped = 1;
528
529 /* If we're suspending all threads, leave this one suspended
530 too. */
531 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
532 new_lwp->suspended = 1;
533
534 /* Normally we will get the pending SIGSTOP. But in some cases
535 we might get another signal delivered to the group first.
536 If we do get another signal, be sure not to lose it. */
537 if (WSTOPSIG (status) != SIGSTOP)
538 {
539 new_lwp->stop_expected = 1;
540 new_lwp->status_pending_p = 1;
541 new_lwp->status_pending = status;
542 }
543
544 /* Don't report the event. */
545 return 1;
546 }
547 else if (event == PTRACE_EVENT_VFORK_DONE)
548 {
549 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
550
551 /* Report the event. */
552 return 0;
553 }
554
555 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
556 }
557
558 /* Return the PC as read from the regcache of LWP, without any
559 adjustment. */
560
561 static CORE_ADDR
562 get_pc (struct lwp_info *lwp)
563 {
564 struct thread_info *saved_thread;
565 struct regcache *regcache;
566 CORE_ADDR pc;
567
568 if (the_low_target.get_pc == NULL)
569 return 0;
570
571 saved_thread = current_thread;
572 current_thread = get_lwp_thread (lwp);
573
574 regcache = get_thread_regcache (current_thread, 1);
575 pc = (*the_low_target.get_pc) (regcache);
576
577 if (debug_threads)
578 debug_printf ("pc is 0x%lx\n", (long) pc);
579
580 current_thread = saved_thread;
581 return pc;
582 }
583
584 /* This function should only be called if LWP got a SIGTRAP.
585 The SIGTRAP could mean several things.
586
587 On i386, where decr_pc_after_break is non-zero:
588
589 If we were single-stepping this process using PTRACE_SINGLESTEP, we
590 will get only the one SIGTRAP. The value of $eip will be the next
591 instruction. If the instruction we stepped over was a breakpoint,
592 we need to decrement the PC.
593
594 If we continue the process using PTRACE_CONT, we will get a
595 SIGTRAP when we hit a breakpoint. The value of $eip will be
596 the instruction after the breakpoint (i.e. needs to be
597 decremented). If we report the SIGTRAP to GDB, we must also
598 report the undecremented PC. If the breakpoint is removed, we
599 must resume at the decremented PC.
600
601 On a non-decr_pc_after_break machine with hardware or kernel
602 single-step:
603
604 If we either single-step a breakpoint instruction, or continue and
605 hit a breakpoint instruction, our PC will point at the breakpoint
606 instruction. */
607
608 static int
609 check_stopped_by_breakpoint (struct lwp_info *lwp)
610 {
611 CORE_ADDR pc;
612 CORE_ADDR sw_breakpoint_pc;
613 struct thread_info *saved_thread;
614 #if USE_SIGTRAP_SIGINFO
615 siginfo_t siginfo;
616 #endif
617
618 if (the_low_target.get_pc == NULL)
619 return 0;
620
621 pc = get_pc (lwp);
622 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
623
624 /* breakpoint_at reads from the current thread. */
625 saved_thread = current_thread;
626 current_thread = get_lwp_thread (lwp);
627
628 #if USE_SIGTRAP_SIGINFO
629 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
630 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
631 {
632 if (siginfo.si_signo == SIGTRAP)
633 {
634 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
635 {
636 if (debug_threads)
637 {
638 struct thread_info *thr = get_lwp_thread (lwp);
639
640 debug_printf ("CSBB: %s stopped by software breakpoint\n",
641 target_pid_to_str (ptid_of (thr)));
642 }
643
644 /* Back up the PC if necessary. */
645 if (pc != sw_breakpoint_pc)
646 {
647 struct regcache *regcache
648 = get_thread_regcache (current_thread, 1);
649 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
650 }
651
652 lwp->stop_pc = sw_breakpoint_pc;
653 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
654 current_thread = saved_thread;
655 return 1;
656 }
657 else if (siginfo.si_code == TRAP_HWBKPT)
658 {
659 if (debug_threads)
660 {
661 struct thread_info *thr = get_lwp_thread (lwp);
662
663 debug_printf ("CSBB: %s stopped by hardware "
664 "breakpoint/watchpoint\n",
665 target_pid_to_str (ptid_of (thr)));
666 }
667
668 lwp->stop_pc = pc;
669 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
670 current_thread = saved_thread;
671 return 1;
672 }
673 else if (siginfo.si_code == TRAP_TRACE)
674 {
675 if (debug_threads)
676 {
677 struct thread_info *thr = get_lwp_thread (lwp);
678
679 debug_printf ("CSBB: %s stopped by trace\n",
680 target_pid_to_str (ptid_of (thr)));
681 }
682 }
683 }
684 }
685 #else
686 /* We may have just stepped a breakpoint instruction. E.g., in
687 non-stop mode, GDB first tells the thread A to step a range, and
688 then the user inserts a breakpoint inside the range. In that
689 case we need to report the breakpoint PC. */
690 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
691 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
692 {
693 if (debug_threads)
694 {
695 struct thread_info *thr = get_lwp_thread (lwp);
696
697 debug_printf ("CSBB: %s stopped by software breakpoint\n",
698 target_pid_to_str (ptid_of (thr)));
699 }
700
701 /* Back up the PC if necessary. */
702 if (pc != sw_breakpoint_pc)
703 {
704 struct regcache *regcache
705 = get_thread_regcache (current_thread, 1);
706 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
707 }
708
709 lwp->stop_pc = sw_breakpoint_pc;
710 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
711 current_thread = saved_thread;
712 return 1;
713 }
714
715 if (hardware_breakpoint_inserted_here (pc))
716 {
717 if (debug_threads)
718 {
719 struct thread_info *thr = get_lwp_thread (lwp);
720
721 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
722 target_pid_to_str (ptid_of (thr)));
723 }
724
725 lwp->stop_pc = pc;
726 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
727 current_thread = saved_thread;
728 return 1;
729 }
730 #endif
731
732 current_thread = saved_thread;
733 return 0;
734 }
735
736 static struct lwp_info *
737 add_lwp (ptid_t ptid)
738 {
739 struct lwp_info *lwp;
740
741 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
742 memset (lwp, 0, sizeof (*lwp));
743
744 if (the_low_target.new_thread != NULL)
745 the_low_target.new_thread (lwp);
746
747 lwp->thread = add_thread (ptid, lwp);
748
749 return lwp;
750 }
751
752 /* Start an inferior process and returns its pid.
753 ALLARGS is a vector of program-name and args. */
754
755 static int
756 linux_create_inferior (char *program, char **allargs)
757 {
758 struct lwp_info *new_lwp;
759 int pid;
760 ptid_t ptid;
761 struct cleanup *restore_personality
762 = maybe_disable_address_space_randomization (disable_randomization);
763
764 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
765 pid = vfork ();
766 #else
767 pid = fork ();
768 #endif
769 if (pid < 0)
770 perror_with_name ("fork");
771
772 if (pid == 0)
773 {
774 close_most_fds ();
775 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
776
777 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
778 signal (__SIGRTMIN + 1, SIG_DFL);
779 #endif
780
781 setpgid (0, 0);
782
783 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
784 stdout to stderr so that inferior i/o doesn't corrupt the connection.
785 Also, redirect stdin to /dev/null. */
786 if (remote_connection_is_stdio ())
787 {
788 close (0);
789 open ("/dev/null", O_RDONLY);
790 dup2 (2, 1);
791 if (write (2, "stdin/stdout redirected\n",
792 sizeof ("stdin/stdout redirected\n") - 1) < 0)
793 {
794 /* Errors ignored. */;
795 }
796 }
797
798 execv (program, allargs);
799 if (errno == ENOENT)
800 execvp (program, allargs);
801
802 fprintf (stderr, "Cannot exec %s: %s.\n", program,
803 strerror (errno));
804 fflush (stderr);
805 _exit (0177);
806 }
807
808 do_cleanups (restore_personality);
809
810 linux_add_process (pid, 0);
811
812 ptid = ptid_build (pid, pid, 0);
813 new_lwp = add_lwp (ptid);
814 new_lwp->must_set_ptrace_flags = 1;
815
816 return pid;
817 }
818
819 /* Attach to an inferior process. Returns 0 on success, ERRNO on
820 error. */
821
822 int
823 linux_attach_lwp (ptid_t ptid)
824 {
825 struct lwp_info *new_lwp;
826 int lwpid = ptid_get_lwp (ptid);
827
828 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
829 != 0)
830 return errno;
831
832 new_lwp = add_lwp (ptid);
833
834 /* We need to wait for SIGSTOP before being able to make the next
835 ptrace call on this LWP. */
836 new_lwp->must_set_ptrace_flags = 1;
837
838 if (linux_proc_pid_is_stopped (lwpid))
839 {
840 if (debug_threads)
841 debug_printf ("Attached to a stopped process\n");
842
843 /* The process is definitely stopped. It is in a job control
844 stop, unless the kernel predates the TASK_STOPPED /
845 TASK_TRACED distinction, in which case it might be in a
846 ptrace stop. Make sure it is in a ptrace stop; from there we
847 can kill it, signal it, et cetera.
848
849 First make sure there is a pending SIGSTOP. Since we are
850 already attached, the process can not transition from stopped
851 to running without a PTRACE_CONT; so we know this signal will
852 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
853 probably already in the queue (unless this kernel is old
854 enough to use TASK_STOPPED for ptrace stops); but since
855 SIGSTOP is not an RT signal, it can only be queued once. */
856 kill_lwp (lwpid, SIGSTOP);
857
858 /* Finally, resume the stopped process. This will deliver the
859 SIGSTOP (or a higher priority signal, just like normal
860 PTRACE_ATTACH), which we'll catch later on. */
861 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
862 }
863
864 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
865 brings it to a halt.
866
867 There are several cases to consider here:
868
869 1) gdbserver has already attached to the process and is being notified
870 of a new thread that is being created.
871 In this case we should ignore that SIGSTOP and resume the
872 process. This is handled below by setting stop_expected = 1,
873 and the fact that add_thread sets last_resume_kind ==
874 resume_continue.
875
876 2) This is the first thread (the process thread), and we're attaching
877 to it via attach_inferior.
878 In this case we want the process thread to stop.
879 This is handled by having linux_attach set last_resume_kind ==
880 resume_stop after we return.
881
882 If the pid we are attaching to is also the tgid, we attach to and
883 stop all the existing threads. Otherwise, we attach to pid and
884 ignore any other threads in the same group as this pid.
885
886 3) GDB is connecting to gdbserver and is requesting an enumeration of all
887 existing threads.
888 In this case we want the thread to stop.
889 FIXME: This case is currently not properly handled.
890 We should wait for the SIGSTOP but don't. Things work apparently
891 because enough time passes between when we ptrace (ATTACH) and when
892 gdb makes the next ptrace call on the thread.
893
894 On the other hand, if we are currently trying to stop all threads, we
895 should treat the new thread as if we had sent it a SIGSTOP. This works
896 because we are guaranteed that the add_lwp call above added us to the
897 end of the list, and so the new thread has not yet reached
898 wait_for_sigstop (but will). */
899 new_lwp->stop_expected = 1;
900
901 return 0;
902 }
903
904 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
905 already attached. Returns true if a new LWP is found, false
906 otherwise. */
907
908 static int
909 attach_proc_task_lwp_callback (ptid_t ptid)
910 {
911 /* Is this a new thread? */
912 if (find_thread_ptid (ptid) == NULL)
913 {
914 int lwpid = ptid_get_lwp (ptid);
915 int err;
916
917 if (debug_threads)
918 debug_printf ("Found new lwp %d\n", lwpid);
919
920 err = linux_attach_lwp (ptid);
921
922 /* Be quiet if we simply raced with the thread exiting. EPERM
923 is returned if the thread's task still exists, and is marked
924 as exited or zombie, as well as other conditions, so in that
925 case, confirm the status in /proc/PID/status. */
926 if (err == ESRCH
927 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
928 {
929 if (debug_threads)
930 {
931 debug_printf ("Cannot attach to lwp %d: "
932 "thread is gone (%d: %s)\n",
933 lwpid, err, strerror (err));
934 }
935 }
936 else if (err != 0)
937 {
938 warning (_("Cannot attach to lwp %d: %s"),
939 lwpid,
940 linux_ptrace_attach_fail_reason_string (ptid, err));
941 }
942
943 return 1;
944 }
945 return 0;
946 }
947
948 /* Attach to PID. If PID is the tgid, attach to it and all
949 of its threads. */
950
951 static int
952 linux_attach (unsigned long pid)
953 {
954 ptid_t ptid = ptid_build (pid, pid, 0);
955 int err;
956
957 /* Attach to PID. We will check for other threads
958 soon. */
959 err = linux_attach_lwp (ptid);
960 if (err != 0)
961 error ("Cannot attach to process %ld: %s",
962 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
963
964 linux_add_process (pid, 1);
965
966 if (!non_stop)
967 {
968 struct thread_info *thread;
969
970 /* Don't ignore the initial SIGSTOP if we just attached to this
971 process. It will be collected by wait shortly. */
972 thread = find_thread_ptid (ptid_build (pid, pid, 0));
973 thread->last_resume_kind = resume_stop;
974 }
975
976 /* We must attach to every LWP. If /proc is mounted, use that to
977 find them now. On the one hand, the inferior may be using raw
978 clone instead of using pthreads. On the other hand, even if it
979 is using pthreads, GDB may not be connected yet (thread_db needs
980 to do symbol lookups, through qSymbol). Also, thread_db walks
981 structures in the inferior's address space to find the list of
982 threads/LWPs, and those structures may well be corrupted. Note
983 that once thread_db is loaded, we'll still use it to list threads
984 and associate pthread info with each LWP. */
985 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
986 return 0;
987 }
988
989 struct counter
990 {
991 int pid;
992 int count;
993 };
994
995 static int
996 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
997 {
998 struct counter *counter = args;
999
1000 if (ptid_get_pid (entry->id) == counter->pid)
1001 {
1002 if (++counter->count > 1)
1003 return 1;
1004 }
1005
1006 return 0;
1007 }
1008
1009 static int
1010 last_thread_of_process_p (int pid)
1011 {
1012 struct counter counter = { pid , 0 };
1013
1014 return (find_inferior (&all_threads,
1015 second_thread_of_pid_p, &counter) == NULL);
1016 }
1017
1018 /* Kill LWP. */
1019
1020 static void
1021 linux_kill_one_lwp (struct lwp_info *lwp)
1022 {
1023 struct thread_info *thr = get_lwp_thread (lwp);
1024 int pid = lwpid_of (thr);
1025
1026 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1027 there is no signal context, and ptrace(PTRACE_KILL) (or
1028 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1029 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1030 alternative is to kill with SIGKILL. We only need one SIGKILL
1031 per process, not one for each thread. But since we still support
1032 linuxthreads, and we also support debugging programs using raw
1033 clone without CLONE_THREAD, we send one for each thread. For
1034 years, we used PTRACE_KILL only, so we're being a bit paranoid
1035 about some old kernels where PTRACE_KILL might work better
1036 (dubious if there are any such, but that's why it's paranoia), so
1037 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1038 everywhere. */
1039
1040 errno = 0;
1041 kill_lwp (pid, SIGKILL);
1042 if (debug_threads)
1043 {
1044 int save_errno = errno;
1045
1046 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1047 target_pid_to_str (ptid_of (thr)),
1048 save_errno ? strerror (save_errno) : "OK");
1049 }
1050
1051 errno = 0;
1052 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1053 if (debug_threads)
1054 {
1055 int save_errno = errno;
1056
1057 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1058 target_pid_to_str (ptid_of (thr)),
1059 save_errno ? strerror (save_errno) : "OK");
1060 }
1061 }
1062
1063 /* Kill LWP and wait for it to die. */
1064
1065 static void
1066 kill_wait_lwp (struct lwp_info *lwp)
1067 {
1068 struct thread_info *thr = get_lwp_thread (lwp);
1069 int pid = ptid_get_pid (ptid_of (thr));
1070 int lwpid = ptid_get_lwp (ptid_of (thr));
1071 int wstat;
1072 int res;
1073
1074 if (debug_threads)
1075 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1076
1077 do
1078 {
1079 linux_kill_one_lwp (lwp);
1080
1081 /* Make sure it died. Notes:
1082
1083 - The loop is most likely unnecessary.
1084
1085 - We don't use linux_wait_for_event as that could delete lwps
1086 while we're iterating over them. We're not interested in
1087 any pending status at this point, only in making sure all
1088 wait status on the kernel side are collected until the
1089 process is reaped.
1090
1091 - We don't use __WALL here as the __WALL emulation relies on
1092 SIGCHLD, and killing a stopped process doesn't generate
1093 one, nor an exit status.
1094 */
1095 res = my_waitpid (lwpid, &wstat, 0);
1096 if (res == -1 && errno == ECHILD)
1097 res = my_waitpid (lwpid, &wstat, __WCLONE);
1098 } while (res > 0 && WIFSTOPPED (wstat));
1099
1100 gdb_assert (res > 0);
1101 }
1102
1103 /* Callback for `find_inferior'. Kills an lwp of a given process,
1104 except the leader. */
1105
1106 static int
1107 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1108 {
1109 struct thread_info *thread = (struct thread_info *) entry;
1110 struct lwp_info *lwp = get_thread_lwp (thread);
1111 int pid = * (int *) args;
1112
1113 if (ptid_get_pid (entry->id) != pid)
1114 return 0;
1115
1116 /* We avoid killing the first thread here, because of a Linux kernel (at
1117 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1118 the children get a chance to be reaped, it will remain a zombie
1119 forever. */
1120
1121 if (lwpid_of (thread) == pid)
1122 {
1123 if (debug_threads)
1124 debug_printf ("lkop: is last of process %s\n",
1125 target_pid_to_str (entry->id));
1126 return 0;
1127 }
1128
1129 kill_wait_lwp (lwp);
1130 return 0;
1131 }
1132
1133 static int
1134 linux_kill (int pid)
1135 {
1136 struct process_info *process;
1137 struct lwp_info *lwp;
1138
1139 process = find_process_pid (pid);
1140 if (process == NULL)
1141 return -1;
1142
1143 /* If we're killing a running inferior, make sure it is stopped
1144 first, as PTRACE_KILL will not work otherwise. */
1145 stop_all_lwps (0, NULL);
1146
1147 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1148
1149 /* See the comment in linux_kill_one_lwp. We did not kill the first
1150 thread in the list, so do so now. */
1151 lwp = find_lwp_pid (pid_to_ptid (pid));
1152
1153 if (lwp == NULL)
1154 {
1155 if (debug_threads)
1156 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1157 pid);
1158 }
1159 else
1160 kill_wait_lwp (lwp);
1161
1162 the_target->mourn (process);
1163
1164 /* Since we presently can only stop all lwps of all processes, we
1165 need to unstop lwps of other processes. */
1166 unstop_all_lwps (0, NULL);
1167 return 0;
1168 }
1169
1170 /* Get pending signal of THREAD, for detaching purposes. This is the
1171 signal the thread last stopped for, which we need to deliver to the
1172 thread when detaching, otherwise, it'd be suppressed/lost. */
1173
1174 static int
1175 get_detach_signal (struct thread_info *thread)
1176 {
1177 enum gdb_signal signo = GDB_SIGNAL_0;
1178 int status;
1179 struct lwp_info *lp = get_thread_lwp (thread);
1180
1181 if (lp->status_pending_p)
1182 status = lp->status_pending;
1183 else
1184 {
1185 /* If the thread had been suspended by gdbserver, and it stopped
1186 cleanly, then it'll have stopped with SIGSTOP. But we don't
1187 want to deliver that SIGSTOP. */
1188 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1189 || thread->last_status.value.sig == GDB_SIGNAL_0)
1190 return 0;
1191
1192 /* Otherwise, we may need to deliver the signal we
1193 intercepted. */
1194 status = lp->last_status;
1195 }
1196
1197 if (!WIFSTOPPED (status))
1198 {
1199 if (debug_threads)
1200 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1201 target_pid_to_str (ptid_of (thread)));
1202 return 0;
1203 }
1204
1205 /* Extended wait statuses aren't real SIGTRAPs. */
1206 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1207 {
1208 if (debug_threads)
1209 debug_printf ("GPS: lwp %s had stopped with extended "
1210 "status: no pending signal\n",
1211 target_pid_to_str (ptid_of (thread)));
1212 return 0;
1213 }
1214
1215 signo = gdb_signal_from_host (WSTOPSIG (status));
1216
1217 if (program_signals_p && !program_signals[signo])
1218 {
1219 if (debug_threads)
1220 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1221 target_pid_to_str (ptid_of (thread)),
1222 gdb_signal_to_string (signo));
1223 return 0;
1224 }
1225 else if (!program_signals_p
1226 /* If we have no way to know which signals GDB does not
1227 want to have passed to the program, assume
1228 SIGTRAP/SIGINT, which is GDB's default. */
1229 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1230 {
1231 if (debug_threads)
1232 debug_printf ("GPS: lwp %s had signal %s, "
1233 "but we don't know if we should pass it. "
1234 "Default to not.\n",
1235 target_pid_to_str (ptid_of (thread)),
1236 gdb_signal_to_string (signo));
1237 return 0;
1238 }
1239 else
1240 {
1241 if (debug_threads)
1242 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1243 target_pid_to_str (ptid_of (thread)),
1244 gdb_signal_to_string (signo));
1245
1246 return WSTOPSIG (status);
1247 }
1248 }
1249
1250 static int
1251 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1252 {
1253 struct thread_info *thread = (struct thread_info *) entry;
1254 struct lwp_info *lwp = get_thread_lwp (thread);
1255 int pid = * (int *) args;
1256 int sig;
1257
1258 if (ptid_get_pid (entry->id) != pid)
1259 return 0;
1260
1261 /* If there is a pending SIGSTOP, get rid of it. */
1262 if (lwp->stop_expected)
1263 {
1264 if (debug_threads)
1265 debug_printf ("Sending SIGCONT to %s\n",
1266 target_pid_to_str (ptid_of (thread)));
1267
1268 kill_lwp (lwpid_of (thread), SIGCONT);
1269 lwp->stop_expected = 0;
1270 }
1271
1272 /* Flush any pending changes to the process's registers. */
1273 regcache_invalidate_thread (thread);
1274
1275 /* Pass on any pending signal for this thread. */
1276 sig = get_detach_signal (thread);
1277
1278 /* Finally, let it resume. */
1279 if (the_low_target.prepare_to_resume != NULL)
1280 the_low_target.prepare_to_resume (lwp);
1281 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1282 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1283 error (_("Can't detach %s: %s"),
1284 target_pid_to_str (ptid_of (thread)),
1285 strerror (errno));
1286
1287 delete_lwp (lwp);
1288 return 0;
1289 }
1290
1291 static int
1292 linux_detach (int pid)
1293 {
1294 struct process_info *process;
1295
1296 process = find_process_pid (pid);
1297 if (process == NULL)
1298 return -1;
1299
1300 /* Stop all threads before detaching. First, ptrace requires that
1301 the thread is stopped to sucessfully detach. Second, thread_db
1302 may need to uninstall thread event breakpoints from memory, which
1303 only works with a stopped process anyway. */
1304 stop_all_lwps (0, NULL);
1305
1306 #ifdef USE_THREAD_DB
1307 thread_db_detach (process);
1308 #endif
1309
1310 /* Stabilize threads (move out of jump pads). */
1311 stabilize_threads ();
1312
1313 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1314
1315 the_target->mourn (process);
1316
1317 /* Since we presently can only stop all lwps of all processes, we
1318 need to unstop lwps of other processes. */
1319 unstop_all_lwps (0, NULL);
1320 return 0;
1321 }
1322
1323 /* Remove all LWPs that belong to process PROC from the lwp list. */
1324
1325 static int
1326 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1327 {
1328 struct thread_info *thread = (struct thread_info *) entry;
1329 struct lwp_info *lwp = get_thread_lwp (thread);
1330 struct process_info *process = proc;
1331
1332 if (pid_of (thread) == pid_of (process))
1333 delete_lwp (lwp);
1334
1335 return 0;
1336 }
1337
1338 static void
1339 linux_mourn (struct process_info *process)
1340 {
1341 struct process_info_private *priv;
1342
1343 #ifdef USE_THREAD_DB
1344 thread_db_mourn (process);
1345 #endif
1346
1347 find_inferior (&all_threads, delete_lwp_callback, process);
1348
1349 /* Freeing all private data. */
1350 priv = process->priv;
1351 free (priv->arch_private);
1352 free (priv);
1353 process->priv = NULL;
1354
1355 remove_process (process);
1356 }
1357
1358 static void
1359 linux_join (int pid)
1360 {
1361 int status, ret;
1362
1363 do {
1364 ret = my_waitpid (pid, &status, 0);
1365 if (WIFEXITED (status) || WIFSIGNALED (status))
1366 break;
1367 } while (ret != -1 || errno != ECHILD);
1368 }
1369
1370 /* Return nonzero if the given thread is still alive. */
1371 static int
1372 linux_thread_alive (ptid_t ptid)
1373 {
1374 struct lwp_info *lwp = find_lwp_pid (ptid);
1375
1376 /* We assume we always know if a thread exits. If a whole process
1377 exited but we still haven't been able to report it to GDB, we'll
1378 hold on to the last lwp of the dead process. */
1379 if (lwp != NULL)
1380 return !lwp->dead;
1381 else
1382 return 0;
1383 }
1384
1385 /* Return 1 if this lwp still has an interesting status pending. If
1386 not (e.g., it had stopped for a breakpoint that is gone), return
1387 false. */
1388
1389 static int
1390 thread_still_has_status_pending_p (struct thread_info *thread)
1391 {
1392 struct lwp_info *lp = get_thread_lwp (thread);
1393
1394 if (!lp->status_pending_p)
1395 return 0;
1396
1397 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1398 report any status pending the LWP may have. */
1399 if (thread->last_resume_kind == resume_stop
1400 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1401 return 0;
1402
1403 if (thread->last_resume_kind != resume_stop
1404 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1405 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1406 {
1407 struct thread_info *saved_thread;
1408 CORE_ADDR pc;
1409 int discard = 0;
1410
1411 gdb_assert (lp->last_status != 0);
1412
1413 pc = get_pc (lp);
1414
1415 saved_thread = current_thread;
1416 current_thread = thread;
1417
1418 if (pc != lp->stop_pc)
1419 {
1420 if (debug_threads)
1421 debug_printf ("PC of %ld changed\n",
1422 lwpid_of (thread));
1423 discard = 1;
1424 }
1425
1426 #if !USE_SIGTRAP_SIGINFO
1427 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1428 && !(*the_low_target.breakpoint_at) (pc))
1429 {
1430 if (debug_threads)
1431 debug_printf ("previous SW breakpoint of %ld gone\n",
1432 lwpid_of (thread));
1433 discard = 1;
1434 }
1435 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1436 && !hardware_breakpoint_inserted_here (pc))
1437 {
1438 if (debug_threads)
1439 debug_printf ("previous HW breakpoint of %ld gone\n",
1440 lwpid_of (thread));
1441 discard = 1;
1442 }
1443 #endif
1444
1445 current_thread = saved_thread;
1446
1447 if (discard)
1448 {
1449 if (debug_threads)
1450 debug_printf ("discarding pending breakpoint status\n");
1451 lp->status_pending_p = 0;
1452 return 0;
1453 }
1454 }
1455
1456 return 1;
1457 }
1458
1459 /* Return 1 if this lwp has an interesting status pending. */
1460 static int
1461 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1462 {
1463 struct thread_info *thread = (struct thread_info *) entry;
1464 struct lwp_info *lp = get_thread_lwp (thread);
1465 ptid_t ptid = * (ptid_t *) arg;
1466
1467 /* Check if we're only interested in events from a specific process
1468 or a specific LWP. */
1469 if (!ptid_match (ptid_of (thread), ptid))
1470 return 0;
1471
1472 if (lp->status_pending_p
1473 && !thread_still_has_status_pending_p (thread))
1474 {
1475 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1476 return 0;
1477 }
1478
1479 return lp->status_pending_p;
1480 }
1481
1482 static int
1483 same_lwp (struct inferior_list_entry *entry, void *data)
1484 {
1485 ptid_t ptid = *(ptid_t *) data;
1486 int lwp;
1487
1488 if (ptid_get_lwp (ptid) != 0)
1489 lwp = ptid_get_lwp (ptid);
1490 else
1491 lwp = ptid_get_pid (ptid);
1492
1493 if (ptid_get_lwp (entry->id) == lwp)
1494 return 1;
1495
1496 return 0;
1497 }
1498
1499 struct lwp_info *
1500 find_lwp_pid (ptid_t ptid)
1501 {
1502 struct inferior_list_entry *thread
1503 = find_inferior (&all_threads, same_lwp, &ptid);
1504
1505 if (thread == NULL)
1506 return NULL;
1507
1508 return get_thread_lwp ((struct thread_info *) thread);
1509 }
1510
1511 /* Return the number of known LWPs in the tgid given by PID. */
1512
1513 static int
1514 num_lwps (int pid)
1515 {
1516 struct inferior_list_entry *inf, *tmp;
1517 int count = 0;
1518
1519 ALL_INFERIORS (&all_threads, inf, tmp)
1520 {
1521 if (ptid_get_pid (inf->id) == pid)
1522 count++;
1523 }
1524
1525 return count;
1526 }
1527
1528 /* The arguments passed to iterate_over_lwps. */
1529
1530 struct iterate_over_lwps_args
1531 {
1532 /* The FILTER argument passed to iterate_over_lwps. */
1533 ptid_t filter;
1534
1535 /* The CALLBACK argument passed to iterate_over_lwps. */
1536 iterate_over_lwps_ftype *callback;
1537
1538 /* The DATA argument passed to iterate_over_lwps. */
1539 void *data;
1540 };
1541
1542 /* Callback for find_inferior used by iterate_over_lwps to filter
1543 calls to the callback supplied to that function. Returning a
1544 nonzero value causes find_inferiors to stop iterating and return
1545 the current inferior_list_entry. Returning zero indicates that
1546 find_inferiors should continue iterating. */
1547
1548 static int
1549 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1550 {
1551 struct iterate_over_lwps_args *args
1552 = (struct iterate_over_lwps_args *) args_p;
1553
1554 if (ptid_match (entry->id, args->filter))
1555 {
1556 struct thread_info *thr = (struct thread_info *) entry;
1557 struct lwp_info *lwp = get_thread_lwp (thr);
1558
1559 return (*args->callback) (lwp, args->data);
1560 }
1561
1562 return 0;
1563 }
1564
1565 /* See nat/linux-nat.h. */
1566
1567 struct lwp_info *
1568 iterate_over_lwps (ptid_t filter,
1569 iterate_over_lwps_ftype callback,
1570 void *data)
1571 {
1572 struct iterate_over_lwps_args args = {filter, callback, data};
1573 struct inferior_list_entry *entry;
1574
1575 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1576 if (entry == NULL)
1577 return NULL;
1578
1579 return get_thread_lwp ((struct thread_info *) entry);
1580 }
1581
1582 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1583 their exits until all other threads in the group have exited. */
1584
1585 static void
1586 check_zombie_leaders (void)
1587 {
1588 struct process_info *proc, *tmp;
1589
1590 ALL_PROCESSES (proc, tmp)
1591 {
1592 pid_t leader_pid = pid_of (proc);
1593 struct lwp_info *leader_lp;
1594
1595 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1596
1597 if (debug_threads)
1598 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1599 "num_lwps=%d, zombie=%d\n",
1600 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1601 linux_proc_pid_is_zombie (leader_pid));
1602
1603 if (leader_lp != NULL
1604 /* Check if there are other threads in the group, as we may
1605 have raced with the inferior simply exiting. */
1606 && !last_thread_of_process_p (leader_pid)
1607 && linux_proc_pid_is_zombie (leader_pid))
1608 {
1609 /* A leader zombie can mean one of two things:
1610
1611 - It exited, and there's an exit status pending
1612 available, or only the leader exited (not the whole
1613 program). In the latter case, we can't waitpid the
1614 leader's exit status until all other threads are gone.
1615
1616 - There are 3 or more threads in the group, and a thread
1617 other than the leader exec'd. On an exec, the Linux
1618 kernel destroys all other threads (except the execing
1619 one) in the thread group, and resets the execing thread's
1620 tid to the tgid. No exit notification is sent for the
1621 execing thread -- from the ptracer's perspective, it
1622 appears as though the execing thread just vanishes.
1623 Until we reap all other threads except the leader and the
1624 execing thread, the leader will be zombie, and the
1625 execing thread will be in `D (disc sleep)'. As soon as
1626 all other threads are reaped, the execing thread changes
1627 it's tid to the tgid, and the previous (zombie) leader
1628 vanishes, giving place to the "new" leader. We could try
1629 distinguishing the exit and exec cases, by waiting once
1630 more, and seeing if something comes out, but it doesn't
1631 sound useful. The previous leader _does_ go away, and
1632 we'll re-add the new one once we see the exec event
1633 (which is just the same as what would happen if the
1634 previous leader did exit voluntarily before some other
1635 thread execs). */
1636
1637 if (debug_threads)
1638 fprintf (stderr,
1639 "CZL: Thread group leader %d zombie "
1640 "(it exited, or another thread execd).\n",
1641 leader_pid);
1642
1643 delete_lwp (leader_lp);
1644 }
1645 }
1646 }
1647
1648 /* Callback for `find_inferior'. Returns the first LWP that is not
1649 stopped. ARG is a PTID filter. */
1650
1651 static int
1652 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1653 {
1654 struct thread_info *thr = (struct thread_info *) entry;
1655 struct lwp_info *lwp;
1656 ptid_t filter = *(ptid_t *) arg;
1657
1658 if (!ptid_match (ptid_of (thr), filter))
1659 return 0;
1660
1661 lwp = get_thread_lwp (thr);
1662 if (!lwp->stopped)
1663 return 1;
1664
1665 return 0;
1666 }
1667
1668 /* This function should only be called if the LWP got a SIGTRAP.
1669
1670 Handle any tracepoint steps or hits. Return true if a tracepoint
1671 event was handled, 0 otherwise. */
1672
1673 static int
1674 handle_tracepoints (struct lwp_info *lwp)
1675 {
1676 struct thread_info *tinfo = get_lwp_thread (lwp);
1677 int tpoint_related_event = 0;
1678
1679 gdb_assert (lwp->suspended == 0);
1680
1681 /* If this tracepoint hit causes a tracing stop, we'll immediately
1682 uninsert tracepoints. To do this, we temporarily pause all
1683 threads, unpatch away, and then unpause threads. We need to make
1684 sure the unpausing doesn't resume LWP too. */
1685 lwp->suspended++;
1686
1687 /* And we need to be sure that any all-threads-stopping doesn't try
1688 to move threads out of the jump pads, as it could deadlock the
1689 inferior (LWP could be in the jump pad, maybe even holding the
1690 lock.) */
1691
1692 /* Do any necessary step collect actions. */
1693 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1694
1695 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1696
1697 /* See if we just hit a tracepoint and do its main collect
1698 actions. */
1699 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1700
1701 lwp->suspended--;
1702
1703 gdb_assert (lwp->suspended == 0);
1704 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1705
1706 if (tpoint_related_event)
1707 {
1708 if (debug_threads)
1709 debug_printf ("got a tracepoint event\n");
1710 return 1;
1711 }
1712
1713 return 0;
1714 }
1715
1716 /* Convenience wrapper. Returns true if LWP is presently collecting a
1717 fast tracepoint. */
1718
1719 static int
1720 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1721 struct fast_tpoint_collect_status *status)
1722 {
1723 CORE_ADDR thread_area;
1724 struct thread_info *thread = get_lwp_thread (lwp);
1725
1726 if (the_low_target.get_thread_area == NULL)
1727 return 0;
1728
1729 /* Get the thread area address. This is used to recognize which
1730 thread is which when tracing with the in-process agent library.
1731 We don't read anything from the address, and treat it as opaque;
1732 it's the address itself that we assume is unique per-thread. */
1733 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1734 return 0;
1735
1736 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1737 }
1738
1739 /* The reason we resume in the caller, is because we want to be able
1740 to pass lwp->status_pending as WSTAT, and we need to clear
1741 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1742 refuses to resume. */
1743
1744 static int
1745 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1746 {
1747 struct thread_info *saved_thread;
1748
1749 saved_thread = current_thread;
1750 current_thread = get_lwp_thread (lwp);
1751
1752 if ((wstat == NULL
1753 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1754 && supports_fast_tracepoints ()
1755 && agent_loaded_p ())
1756 {
1757 struct fast_tpoint_collect_status status;
1758 int r;
1759
1760 if (debug_threads)
1761 debug_printf ("Checking whether LWP %ld needs to move out of the "
1762 "jump pad.\n",
1763 lwpid_of (current_thread));
1764
1765 r = linux_fast_tracepoint_collecting (lwp, &status);
1766
1767 if (wstat == NULL
1768 || (WSTOPSIG (*wstat) != SIGILL
1769 && WSTOPSIG (*wstat) != SIGFPE
1770 && WSTOPSIG (*wstat) != SIGSEGV
1771 && WSTOPSIG (*wstat) != SIGBUS))
1772 {
1773 lwp->collecting_fast_tracepoint = r;
1774
1775 if (r != 0)
1776 {
1777 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1778 {
1779 /* Haven't executed the original instruction yet.
1780 Set breakpoint there, and wait till it's hit,
1781 then single-step until exiting the jump pad. */
1782 lwp->exit_jump_pad_bkpt
1783 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1784 }
1785
1786 if (debug_threads)
1787 debug_printf ("Checking whether LWP %ld needs to move out of "
1788 "the jump pad...it does\n",
1789 lwpid_of (current_thread));
1790 current_thread = saved_thread;
1791
1792 return 1;
1793 }
1794 }
1795 else
1796 {
1797 /* If we get a synchronous signal while collecting, *and*
1798 while executing the (relocated) original instruction,
1799 reset the PC to point at the tpoint address, before
1800 reporting to GDB. Otherwise, it's an IPA lib bug: just
1801 report the signal to GDB, and pray for the best. */
1802
1803 lwp->collecting_fast_tracepoint = 0;
1804
1805 if (r != 0
1806 && (status.adjusted_insn_addr <= lwp->stop_pc
1807 && lwp->stop_pc < status.adjusted_insn_addr_end))
1808 {
1809 siginfo_t info;
1810 struct regcache *regcache;
1811
1812 /* The si_addr on a few signals references the address
1813 of the faulting instruction. Adjust that as
1814 well. */
1815 if ((WSTOPSIG (*wstat) == SIGILL
1816 || WSTOPSIG (*wstat) == SIGFPE
1817 || WSTOPSIG (*wstat) == SIGBUS
1818 || WSTOPSIG (*wstat) == SIGSEGV)
1819 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1820 (PTRACE_TYPE_ARG3) 0, &info) == 0
1821 /* Final check just to make sure we don't clobber
1822 the siginfo of non-kernel-sent signals. */
1823 && (uintptr_t) info.si_addr == lwp->stop_pc)
1824 {
1825 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1826 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1827 (PTRACE_TYPE_ARG3) 0, &info);
1828 }
1829
1830 regcache = get_thread_regcache (current_thread, 1);
1831 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1832 lwp->stop_pc = status.tpoint_addr;
1833
1834 /* Cancel any fast tracepoint lock this thread was
1835 holding. */
1836 force_unlock_trace_buffer ();
1837 }
1838
1839 if (lwp->exit_jump_pad_bkpt != NULL)
1840 {
1841 if (debug_threads)
1842 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1843 "stopping all threads momentarily.\n");
1844
1845 stop_all_lwps (1, lwp);
1846
1847 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1848 lwp->exit_jump_pad_bkpt = NULL;
1849
1850 unstop_all_lwps (1, lwp);
1851
1852 gdb_assert (lwp->suspended >= 0);
1853 }
1854 }
1855 }
1856
1857 if (debug_threads)
1858 debug_printf ("Checking whether LWP %ld needs to move out of the "
1859 "jump pad...no\n",
1860 lwpid_of (current_thread));
1861
1862 current_thread = saved_thread;
1863 return 0;
1864 }
1865
1866 /* Enqueue one signal in the "signals to report later when out of the
1867 jump pad" list. */
1868
1869 static void
1870 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1871 {
1872 struct pending_signals *p_sig;
1873 struct thread_info *thread = get_lwp_thread (lwp);
1874
1875 if (debug_threads)
1876 debug_printf ("Deferring signal %d for LWP %ld.\n",
1877 WSTOPSIG (*wstat), lwpid_of (thread));
1878
1879 if (debug_threads)
1880 {
1881 struct pending_signals *sig;
1882
1883 for (sig = lwp->pending_signals_to_report;
1884 sig != NULL;
1885 sig = sig->prev)
1886 debug_printf (" Already queued %d\n",
1887 sig->signal);
1888
1889 debug_printf (" (no more currently queued signals)\n");
1890 }
1891
1892 /* Don't enqueue non-RT signals if they are already in the deferred
1893 queue. (SIGSTOP being the easiest signal to see ending up here
1894 twice) */
1895 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1896 {
1897 struct pending_signals *sig;
1898
1899 for (sig = lwp->pending_signals_to_report;
1900 sig != NULL;
1901 sig = sig->prev)
1902 {
1903 if (sig->signal == WSTOPSIG (*wstat))
1904 {
1905 if (debug_threads)
1906 debug_printf ("Not requeuing already queued non-RT signal %d"
1907 " for LWP %ld\n",
1908 sig->signal,
1909 lwpid_of (thread));
1910 return;
1911 }
1912 }
1913 }
1914
1915 p_sig = xmalloc (sizeof (*p_sig));
1916 p_sig->prev = lwp->pending_signals_to_report;
1917 p_sig->signal = WSTOPSIG (*wstat);
1918 memset (&p_sig->info, 0, sizeof (siginfo_t));
1919 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1920 &p_sig->info);
1921
1922 lwp->pending_signals_to_report = p_sig;
1923 }
1924
1925 /* Dequeue one signal from the "signals to report later when out of
1926 the jump pad" list. */
1927
1928 static int
1929 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1930 {
1931 struct thread_info *thread = get_lwp_thread (lwp);
1932
1933 if (lwp->pending_signals_to_report != NULL)
1934 {
1935 struct pending_signals **p_sig;
1936
1937 p_sig = &lwp->pending_signals_to_report;
1938 while ((*p_sig)->prev != NULL)
1939 p_sig = &(*p_sig)->prev;
1940
1941 *wstat = W_STOPCODE ((*p_sig)->signal);
1942 if ((*p_sig)->info.si_signo != 0)
1943 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1944 &(*p_sig)->info);
1945 free (*p_sig);
1946 *p_sig = NULL;
1947
1948 if (debug_threads)
1949 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1950 WSTOPSIG (*wstat), lwpid_of (thread));
1951
1952 if (debug_threads)
1953 {
1954 struct pending_signals *sig;
1955
1956 for (sig = lwp->pending_signals_to_report;
1957 sig != NULL;
1958 sig = sig->prev)
1959 debug_printf (" Still queued %d\n",
1960 sig->signal);
1961
1962 debug_printf (" (no more queued signals)\n");
1963 }
1964
1965 return 1;
1966 }
1967
1968 return 0;
1969 }
1970
1971 /* Fetch the possibly triggered data watchpoint info and store it in
1972 CHILD.
1973
1974 On some archs, like x86, that use debug registers to set
1975 watchpoints, it's possible that the way to know which watched
1976 address trapped, is to check the register that is used to select
1977 which address to watch. Problem is, between setting the watchpoint
1978 and reading back which data address trapped, the user may change
1979 the set of watchpoints, and, as a consequence, GDB changes the
1980 debug registers in the inferior. To avoid reading back a stale
1981 stopped-data-address when that happens, we cache in LP the fact
1982 that a watchpoint trapped, and the corresponding data address, as
1983 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1984 registers meanwhile, we have the cached data we can rely on. */
1985
1986 static int
1987 check_stopped_by_watchpoint (struct lwp_info *child)
1988 {
1989 if (the_low_target.stopped_by_watchpoint != NULL)
1990 {
1991 struct thread_info *saved_thread;
1992
1993 saved_thread = current_thread;
1994 current_thread = get_lwp_thread (child);
1995
1996 if (the_low_target.stopped_by_watchpoint ())
1997 {
1998 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
1999
2000 if (the_low_target.stopped_data_address != NULL)
2001 child->stopped_data_address
2002 = the_low_target.stopped_data_address ();
2003 else
2004 child->stopped_data_address = 0;
2005 }
2006
2007 current_thread = saved_thread;
2008 }
2009
2010 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2011 }
2012
2013 /* Return the ptrace options that we want to try to enable. */
2014
2015 static int
2016 linux_low_ptrace_options (int attached)
2017 {
2018 int options = 0;
2019
2020 if (!attached)
2021 options |= PTRACE_O_EXITKILL;
2022
2023 if (report_fork_events)
2024 options |= PTRACE_O_TRACEFORK;
2025
2026 if (report_vfork_events)
2027 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2028
2029 return options;
2030 }
2031
2032 /* Do low-level handling of the event, and check if we should go on
2033 and pass it to caller code. Return the affected lwp if we are, or
2034 NULL otherwise. */
2035
2036 static struct lwp_info *
2037 linux_low_filter_event (int lwpid, int wstat)
2038 {
2039 struct lwp_info *child;
2040 struct thread_info *thread;
2041 int have_stop_pc = 0;
2042
2043 child = find_lwp_pid (pid_to_ptid (lwpid));
2044
2045 /* If we didn't find a process, one of two things presumably happened:
2046 - A process we started and then detached from has exited. Ignore it.
2047 - A process we are controlling has forked and the new child's stop
2048 was reported to us by the kernel. Save its PID. */
2049 if (child == NULL && WIFSTOPPED (wstat))
2050 {
2051 add_to_pid_list (&stopped_pids, lwpid, wstat);
2052 return NULL;
2053 }
2054 else if (child == NULL)
2055 return NULL;
2056
2057 thread = get_lwp_thread (child);
2058
2059 child->stopped = 1;
2060
2061 child->last_status = wstat;
2062
2063 /* Check if the thread has exited. */
2064 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2065 {
2066 if (debug_threads)
2067 debug_printf ("LLFE: %d exited.\n", lwpid);
2068 if (num_lwps (pid_of (thread)) > 1)
2069 {
2070
2071 /* If there is at least one more LWP, then the exit signal was
2072 not the end of the debugged application and should be
2073 ignored. */
2074 delete_lwp (child);
2075 return NULL;
2076 }
2077 else
2078 {
2079 /* This was the last lwp in the process. Since events are
2080 serialized to GDB core, and we can't report this one
2081 right now, but GDB core and the other target layers will
2082 want to be notified about the exit code/signal, leave the
2083 status pending for the next time we're able to report
2084 it. */
2085 mark_lwp_dead (child, wstat);
2086 return child;
2087 }
2088 }
2089
2090 gdb_assert (WIFSTOPPED (wstat));
2091
2092 if (WIFSTOPPED (wstat))
2093 {
2094 struct process_info *proc;
2095
2096 /* Architecture-specific setup after inferior is running. This
2097 needs to happen after we have attached to the inferior and it
2098 is stopped for the first time, but before we access any
2099 inferior registers. */
2100 proc = find_process_pid (pid_of (thread));
2101 if (proc->priv->new_inferior)
2102 {
2103 struct thread_info *saved_thread;
2104
2105 saved_thread = current_thread;
2106 current_thread = thread;
2107
2108 the_low_target.arch_setup ();
2109
2110 current_thread = saved_thread;
2111
2112 proc->priv->new_inferior = 0;
2113 }
2114 }
2115
2116 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2117 {
2118 struct process_info *proc = find_process_pid (pid_of (thread));
2119 int options = linux_low_ptrace_options (proc->attached);
2120
2121 linux_enable_event_reporting (lwpid, options);
2122 child->must_set_ptrace_flags = 0;
2123 }
2124
2125 /* Be careful to not overwrite stop_pc until
2126 check_stopped_by_breakpoint is called. */
2127 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2128 && linux_is_extended_waitstatus (wstat))
2129 {
2130 child->stop_pc = get_pc (child);
2131 if (handle_extended_wait (child, wstat))
2132 {
2133 /* The event has been handled, so just return without
2134 reporting it. */
2135 return NULL;
2136 }
2137 }
2138
2139 /* Check first whether this was a SW/HW breakpoint before checking
2140 watchpoints, because at least s390 can't tell the data address of
2141 hardware watchpoint hits, and returns stopped-by-watchpoint as
2142 long as there's a watchpoint set. */
2143 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2144 {
2145 if (check_stopped_by_breakpoint (child))
2146 have_stop_pc = 1;
2147 }
2148
2149 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2150 or hardware watchpoint. Check which is which if we got
2151 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2152 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2153 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2154 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2155 check_stopped_by_watchpoint (child);
2156
2157 if (!have_stop_pc)
2158 child->stop_pc = get_pc (child);
2159
2160 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2161 && child->stop_expected)
2162 {
2163 if (debug_threads)
2164 debug_printf ("Expected stop.\n");
2165 child->stop_expected = 0;
2166
2167 if (thread->last_resume_kind == resume_stop)
2168 {
2169 /* We want to report the stop to the core. Treat the
2170 SIGSTOP as a normal event. */
2171 if (debug_threads)
2172 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2173 target_pid_to_str (ptid_of (thread)));
2174 }
2175 else if (stopping_threads != NOT_STOPPING_THREADS)
2176 {
2177 /* Stopping threads. We don't want this SIGSTOP to end up
2178 pending. */
2179 if (debug_threads)
2180 debug_printf ("LLW: SIGSTOP caught for %s "
2181 "while stopping threads.\n",
2182 target_pid_to_str (ptid_of (thread)));
2183 return NULL;
2184 }
2185 else
2186 {
2187 /* This is a delayed SIGSTOP. Filter out the event. */
2188 if (debug_threads)
2189 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2190 child->stepping ? "step" : "continue",
2191 target_pid_to_str (ptid_of (thread)));
2192
2193 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2194 return NULL;
2195 }
2196 }
2197
2198 child->status_pending_p = 1;
2199 child->status_pending = wstat;
2200 return child;
2201 }
2202
2203 /* Resume LWPs that are currently stopped without any pending status
2204 to report, but are resumed from the core's perspective. */
2205
2206 static void
2207 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2208 {
2209 struct thread_info *thread = (struct thread_info *) entry;
2210 struct lwp_info *lp = get_thread_lwp (thread);
2211
2212 if (lp->stopped
2213 && !lp->status_pending_p
2214 && thread->last_resume_kind != resume_stop
2215 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2216 {
2217 int step = thread->last_resume_kind == resume_step;
2218
2219 if (debug_threads)
2220 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2221 target_pid_to_str (ptid_of (thread)),
2222 paddress (lp->stop_pc),
2223 step);
2224
2225 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2226 }
2227 }
2228
2229 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2230 match FILTER_PTID (leaving others pending). The PTIDs can be:
2231 minus_one_ptid, to specify any child; a pid PTID, specifying all
2232 lwps of a thread group; or a PTID representing a single lwp. Store
2233 the stop status through the status pointer WSTAT. OPTIONS is
2234 passed to the waitpid call. Return 0 if no event was found and
2235 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2236 was found. Return the PID of the stopped child otherwise. */
2237
2238 static int
2239 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2240 int *wstatp, int options)
2241 {
2242 struct thread_info *event_thread;
2243 struct lwp_info *event_child, *requested_child;
2244 sigset_t block_mask, prev_mask;
2245
2246 retry:
2247 /* N.B. event_thread points to the thread_info struct that contains
2248 event_child. Keep them in sync. */
2249 event_thread = NULL;
2250 event_child = NULL;
2251 requested_child = NULL;
2252
2253 /* Check for a lwp with a pending status. */
2254
2255 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2256 {
2257 event_thread = (struct thread_info *)
2258 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2259 if (event_thread != NULL)
2260 event_child = get_thread_lwp (event_thread);
2261 if (debug_threads && event_thread)
2262 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2263 }
2264 else if (!ptid_equal (filter_ptid, null_ptid))
2265 {
2266 requested_child = find_lwp_pid (filter_ptid);
2267
2268 if (stopping_threads == NOT_STOPPING_THREADS
2269 && requested_child->status_pending_p
2270 && requested_child->collecting_fast_tracepoint)
2271 {
2272 enqueue_one_deferred_signal (requested_child,
2273 &requested_child->status_pending);
2274 requested_child->status_pending_p = 0;
2275 requested_child->status_pending = 0;
2276 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2277 }
2278
2279 if (requested_child->suspended
2280 && requested_child->status_pending_p)
2281 {
2282 internal_error (__FILE__, __LINE__,
2283 "requesting an event out of a"
2284 " suspended child?");
2285 }
2286
2287 if (requested_child->status_pending_p)
2288 {
2289 event_child = requested_child;
2290 event_thread = get_lwp_thread (event_child);
2291 }
2292 }
2293
2294 if (event_child != NULL)
2295 {
2296 if (debug_threads)
2297 debug_printf ("Got an event from pending child %ld (%04x)\n",
2298 lwpid_of (event_thread), event_child->status_pending);
2299 *wstatp = event_child->status_pending;
2300 event_child->status_pending_p = 0;
2301 event_child->status_pending = 0;
2302 current_thread = event_thread;
2303 return lwpid_of (event_thread);
2304 }
2305
2306 /* But if we don't find a pending event, we'll have to wait.
2307
2308 We only enter this loop if no process has a pending wait status.
2309 Thus any action taken in response to a wait status inside this
2310 loop is responding as soon as we detect the status, not after any
2311 pending events. */
2312
2313 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2314 all signals while here. */
2315 sigfillset (&block_mask);
2316 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2317
2318 /* Always pull all events out of the kernel. We'll randomly select
2319 an event LWP out of all that have events, to prevent
2320 starvation. */
2321 while (event_child == NULL)
2322 {
2323 pid_t ret = 0;
2324
2325 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2326 quirks:
2327
2328 - If the thread group leader exits while other threads in the
2329 thread group still exist, waitpid(TGID, ...) hangs. That
2330 waitpid won't return an exit status until the other threads
2331 in the group are reaped.
2332
2333 - When a non-leader thread execs, that thread just vanishes
2334 without reporting an exit (so we'd hang if we waited for it
2335 explicitly in that case). The exec event is reported to
2336 the TGID pid (although we don't currently enable exec
2337 events). */
2338 errno = 0;
2339 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2340
2341 if (debug_threads)
2342 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2343 ret, errno ? strerror (errno) : "ERRNO-OK");
2344
2345 if (ret > 0)
2346 {
2347 if (debug_threads)
2348 {
2349 debug_printf ("LLW: waitpid %ld received %s\n",
2350 (long) ret, status_to_str (*wstatp));
2351 }
2352
2353 /* Filter all events. IOW, leave all events pending. We'll
2354 randomly select an event LWP out of all that have events
2355 below. */
2356 linux_low_filter_event (ret, *wstatp);
2357 /* Retry until nothing comes out of waitpid. A single
2358 SIGCHLD can indicate more than one child stopped. */
2359 continue;
2360 }
2361
2362 /* Now that we've pulled all events out of the kernel, resume
2363 LWPs that don't have an interesting event to report. */
2364 if (stopping_threads == NOT_STOPPING_THREADS)
2365 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2366
2367 /* ... and find an LWP with a status to report to the core, if
2368 any. */
2369 event_thread = (struct thread_info *)
2370 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2371 if (event_thread != NULL)
2372 {
2373 event_child = get_thread_lwp (event_thread);
2374 *wstatp = event_child->status_pending;
2375 event_child->status_pending_p = 0;
2376 event_child->status_pending = 0;
2377 break;
2378 }
2379
2380 /* Check for zombie thread group leaders. Those can't be reaped
2381 until all other threads in the thread group are. */
2382 check_zombie_leaders ();
2383
2384 /* If there are no resumed children left in the set of LWPs we
2385 want to wait for, bail. We can't just block in
2386 waitpid/sigsuspend, because lwps might have been left stopped
2387 in trace-stop state, and we'd be stuck forever waiting for
2388 their status to change (which would only happen if we resumed
2389 them). Even if WNOHANG is set, this return code is preferred
2390 over 0 (below), as it is more detailed. */
2391 if ((find_inferior (&all_threads,
2392 not_stopped_callback,
2393 &wait_ptid) == NULL))
2394 {
2395 if (debug_threads)
2396 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2397 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2398 return -1;
2399 }
2400
2401 /* No interesting event to report to the caller. */
2402 if ((options & WNOHANG))
2403 {
2404 if (debug_threads)
2405 debug_printf ("WNOHANG set, no event found\n");
2406
2407 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2408 return 0;
2409 }
2410
2411 /* Block until we get an event reported with SIGCHLD. */
2412 if (debug_threads)
2413 debug_printf ("sigsuspend'ing\n");
2414
2415 sigsuspend (&prev_mask);
2416 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2417 goto retry;
2418 }
2419
2420 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2421
2422 current_thread = event_thread;
2423
2424 /* Check for thread exit. */
2425 if (! WIFSTOPPED (*wstatp))
2426 {
2427 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2428
2429 if (debug_threads)
2430 debug_printf ("LWP %d is the last lwp of process. "
2431 "Process %ld exiting.\n",
2432 pid_of (event_thread), lwpid_of (event_thread));
2433 return lwpid_of (event_thread);
2434 }
2435
2436 return lwpid_of (event_thread);
2437 }
2438
2439 /* Wait for an event from child(ren) PTID. PTIDs can be:
2440 minus_one_ptid, to specify any child; a pid PTID, specifying all
2441 lwps of a thread group; or a PTID representing a single lwp. Store
2442 the stop status through the status pointer WSTAT. OPTIONS is
2443 passed to the waitpid call. Return 0 if no event was found and
2444 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2445 was found. Return the PID of the stopped child otherwise. */
2446
2447 static int
2448 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2449 {
2450 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2451 }
2452
2453 /* Count the LWP's that have had events. */
2454
2455 static int
2456 count_events_callback (struct inferior_list_entry *entry, void *data)
2457 {
2458 struct thread_info *thread = (struct thread_info *) entry;
2459 struct lwp_info *lp = get_thread_lwp (thread);
2460 int *count = data;
2461
2462 gdb_assert (count != NULL);
2463
2464 /* Count only resumed LWPs that have an event pending. */
2465 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2466 && lp->status_pending_p)
2467 (*count)++;
2468
2469 return 0;
2470 }
2471
2472 /* Select the LWP (if any) that is currently being single-stepped. */
2473
2474 static int
2475 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2476 {
2477 struct thread_info *thread = (struct thread_info *) entry;
2478 struct lwp_info *lp = get_thread_lwp (thread);
2479
2480 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2481 && thread->last_resume_kind == resume_step
2482 && lp->status_pending_p)
2483 return 1;
2484 else
2485 return 0;
2486 }
2487
2488 /* Select the Nth LWP that has had an event. */
2489
2490 static int
2491 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2492 {
2493 struct thread_info *thread = (struct thread_info *) entry;
2494 struct lwp_info *lp = get_thread_lwp (thread);
2495 int *selector = data;
2496
2497 gdb_assert (selector != NULL);
2498
2499 /* Select only resumed LWPs that have an event pending. */
2500 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2501 && lp->status_pending_p)
2502 if ((*selector)-- == 0)
2503 return 1;
2504
2505 return 0;
2506 }
2507
2508 /* Select one LWP out of those that have events pending. */
2509
2510 static void
2511 select_event_lwp (struct lwp_info **orig_lp)
2512 {
2513 int num_events = 0;
2514 int random_selector;
2515 struct thread_info *event_thread = NULL;
2516
2517 /* In all-stop, give preference to the LWP that is being
2518 single-stepped. There will be at most one, and it's the LWP that
2519 the core is most interested in. If we didn't do this, then we'd
2520 have to handle pending step SIGTRAPs somehow in case the core
2521 later continues the previously-stepped thread, otherwise we'd
2522 report the pending SIGTRAP, and the core, not having stepped the
2523 thread, wouldn't understand what the trap was for, and therefore
2524 would report it to the user as a random signal. */
2525 if (!non_stop)
2526 {
2527 event_thread
2528 = (struct thread_info *) find_inferior (&all_threads,
2529 select_singlestep_lwp_callback,
2530 NULL);
2531 if (event_thread != NULL)
2532 {
2533 if (debug_threads)
2534 debug_printf ("SEL: Select single-step %s\n",
2535 target_pid_to_str (ptid_of (event_thread)));
2536 }
2537 }
2538 if (event_thread == NULL)
2539 {
2540 /* No single-stepping LWP. Select one at random, out of those
2541 which have had events. */
2542
2543 /* First see how many events we have. */
2544 find_inferior (&all_threads, count_events_callback, &num_events);
2545 gdb_assert (num_events > 0);
2546
2547 /* Now randomly pick a LWP out of those that have had
2548 events. */
2549 random_selector = (int)
2550 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2551
2552 if (debug_threads && num_events > 1)
2553 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2554 num_events, random_selector);
2555
2556 event_thread
2557 = (struct thread_info *) find_inferior (&all_threads,
2558 select_event_lwp_callback,
2559 &random_selector);
2560 }
2561
2562 if (event_thread != NULL)
2563 {
2564 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2565
2566 /* Switch the event LWP. */
2567 *orig_lp = event_lp;
2568 }
2569 }
2570
2571 /* Decrement the suspend count of an LWP. */
2572
2573 static int
2574 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2575 {
2576 struct thread_info *thread = (struct thread_info *) entry;
2577 struct lwp_info *lwp = get_thread_lwp (thread);
2578
2579 /* Ignore EXCEPT. */
2580 if (lwp == except)
2581 return 0;
2582
2583 lwp->suspended--;
2584
2585 gdb_assert (lwp->suspended >= 0);
2586 return 0;
2587 }
2588
2589 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2590 NULL. */
2591
2592 static void
2593 unsuspend_all_lwps (struct lwp_info *except)
2594 {
2595 find_inferior (&all_threads, unsuspend_one_lwp, except);
2596 }
2597
2598 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2599 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2600 void *data);
2601 static int lwp_running (struct inferior_list_entry *entry, void *data);
2602 static ptid_t linux_wait_1 (ptid_t ptid,
2603 struct target_waitstatus *ourstatus,
2604 int target_options);
2605
2606 /* Stabilize threads (move out of jump pads).
2607
2608 If a thread is midway collecting a fast tracepoint, we need to
2609 finish the collection and move it out of the jump pad before
2610 reporting the signal.
2611
2612 This avoids recursion while collecting (when a signal arrives
2613 midway, and the signal handler itself collects), which would trash
2614 the trace buffer. In case the user set a breakpoint in a signal
2615 handler, this avoids the backtrace showing the jump pad, etc..
2616 Most importantly, there are certain things we can't do safely if
2617 threads are stopped in a jump pad (or in its callee's). For
2618 example:
2619
2620 - starting a new trace run. A thread still collecting the
2621 previous run, could trash the trace buffer when resumed. The trace
2622 buffer control structures would have been reset but the thread had
2623 no way to tell. The thread could even midway memcpy'ing to the
2624 buffer, which would mean that when resumed, it would clobber the
2625 trace buffer that had been set for a new run.
2626
2627 - we can't rewrite/reuse the jump pads for new tracepoints
2628 safely. Say you do tstart while a thread is stopped midway while
2629 collecting. When the thread is later resumed, it finishes the
2630 collection, and returns to the jump pad, to execute the original
2631 instruction that was under the tracepoint jump at the time the
2632 older run had been started. If the jump pad had been rewritten
2633 since for something else in the new run, the thread would now
2634 execute the wrong / random instructions. */
2635
2636 static void
2637 linux_stabilize_threads (void)
2638 {
2639 struct thread_info *saved_thread;
2640 struct thread_info *thread_stuck;
2641
2642 thread_stuck
2643 = (struct thread_info *) find_inferior (&all_threads,
2644 stuck_in_jump_pad_callback,
2645 NULL);
2646 if (thread_stuck != NULL)
2647 {
2648 if (debug_threads)
2649 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2650 lwpid_of (thread_stuck));
2651 return;
2652 }
2653
2654 saved_thread = current_thread;
2655
2656 stabilizing_threads = 1;
2657
2658 /* Kick 'em all. */
2659 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2660
2661 /* Loop until all are stopped out of the jump pads. */
2662 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2663 {
2664 struct target_waitstatus ourstatus;
2665 struct lwp_info *lwp;
2666 int wstat;
2667
2668 /* Note that we go through the full wait even loop. While
2669 moving threads out of jump pad, we need to be able to step
2670 over internal breakpoints and such. */
2671 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2672
2673 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2674 {
2675 lwp = get_thread_lwp (current_thread);
2676
2677 /* Lock it. */
2678 lwp->suspended++;
2679
2680 if (ourstatus.value.sig != GDB_SIGNAL_0
2681 || current_thread->last_resume_kind == resume_stop)
2682 {
2683 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2684 enqueue_one_deferred_signal (lwp, &wstat);
2685 }
2686 }
2687 }
2688
2689 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2690
2691 stabilizing_threads = 0;
2692
2693 current_thread = saved_thread;
2694
2695 if (debug_threads)
2696 {
2697 thread_stuck
2698 = (struct thread_info *) find_inferior (&all_threads,
2699 stuck_in_jump_pad_callback,
2700 NULL);
2701 if (thread_stuck != NULL)
2702 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2703 lwpid_of (thread_stuck));
2704 }
2705 }
2706
2707 static void async_file_mark (void);
2708
2709 /* Convenience function that is called when the kernel reports an
2710 event that is not passed out to GDB. */
2711
2712 static ptid_t
2713 ignore_event (struct target_waitstatus *ourstatus)
2714 {
2715 /* If we got an event, there may still be others, as a single
2716 SIGCHLD can indicate more than one child stopped. This forces
2717 another target_wait call. */
2718 async_file_mark ();
2719
2720 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2721 return null_ptid;
2722 }
2723
2724 /* Return non-zero if WAITSTATUS reflects an extended linux
2725 event. Otherwise, return zero. */
2726
2727 static int
2728 extended_event_reported (const struct target_waitstatus *waitstatus)
2729 {
2730 if (waitstatus == NULL)
2731 return 0;
2732
2733 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2734 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2735 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2736 }
2737
2738 /* Wait for process, returns status. */
2739
2740 static ptid_t
2741 linux_wait_1 (ptid_t ptid,
2742 struct target_waitstatus *ourstatus, int target_options)
2743 {
2744 int w;
2745 struct lwp_info *event_child;
2746 int options;
2747 int pid;
2748 int step_over_finished;
2749 int bp_explains_trap;
2750 int maybe_internal_trap;
2751 int report_to_gdb;
2752 int trace_event;
2753 int in_step_range;
2754
2755 if (debug_threads)
2756 {
2757 debug_enter ();
2758 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2759 }
2760
2761 /* Translate generic target options into linux options. */
2762 options = __WALL;
2763 if (target_options & TARGET_WNOHANG)
2764 options |= WNOHANG;
2765
2766 bp_explains_trap = 0;
2767 trace_event = 0;
2768 in_step_range = 0;
2769 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2770
2771 if (ptid_equal (step_over_bkpt, null_ptid))
2772 pid = linux_wait_for_event (ptid, &w, options);
2773 else
2774 {
2775 if (debug_threads)
2776 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2777 target_pid_to_str (step_over_bkpt));
2778 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2779 }
2780
2781 if (pid == 0)
2782 {
2783 gdb_assert (target_options & TARGET_WNOHANG);
2784
2785 if (debug_threads)
2786 {
2787 debug_printf ("linux_wait_1 ret = null_ptid, "
2788 "TARGET_WAITKIND_IGNORE\n");
2789 debug_exit ();
2790 }
2791
2792 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2793 return null_ptid;
2794 }
2795 else if (pid == -1)
2796 {
2797 if (debug_threads)
2798 {
2799 debug_printf ("linux_wait_1 ret = null_ptid, "
2800 "TARGET_WAITKIND_NO_RESUMED\n");
2801 debug_exit ();
2802 }
2803
2804 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2805 return null_ptid;
2806 }
2807
2808 event_child = get_thread_lwp (current_thread);
2809
2810 /* linux_wait_for_event only returns an exit status for the last
2811 child of a process. Report it. */
2812 if (WIFEXITED (w) || WIFSIGNALED (w))
2813 {
2814 if (WIFEXITED (w))
2815 {
2816 ourstatus->kind = TARGET_WAITKIND_EXITED;
2817 ourstatus->value.integer = WEXITSTATUS (w);
2818
2819 if (debug_threads)
2820 {
2821 debug_printf ("linux_wait_1 ret = %s, exited with "
2822 "retcode %d\n",
2823 target_pid_to_str (ptid_of (current_thread)),
2824 WEXITSTATUS (w));
2825 debug_exit ();
2826 }
2827 }
2828 else
2829 {
2830 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2831 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2832
2833 if (debug_threads)
2834 {
2835 debug_printf ("linux_wait_1 ret = %s, terminated with "
2836 "signal %d\n",
2837 target_pid_to_str (ptid_of (current_thread)),
2838 WTERMSIG (w));
2839 debug_exit ();
2840 }
2841 }
2842
2843 return ptid_of (current_thread);
2844 }
2845
2846 /* If step-over executes a breakpoint instruction, it means a
2847 gdb/gdbserver breakpoint had been planted on top of a permanent
2848 breakpoint. The PC has been adjusted by
2849 check_stopped_by_breakpoint to point at the breakpoint address.
2850 Advance the PC manually past the breakpoint, otherwise the
2851 program would keep trapping the permanent breakpoint forever. */
2852 if (!ptid_equal (step_over_bkpt, null_ptid)
2853 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2854 {
2855 unsigned int increment_pc = the_low_target.breakpoint_len;
2856
2857 if (debug_threads)
2858 {
2859 debug_printf ("step-over for %s executed software breakpoint\n",
2860 target_pid_to_str (ptid_of (current_thread)));
2861 }
2862
2863 if (increment_pc != 0)
2864 {
2865 struct regcache *regcache
2866 = get_thread_regcache (current_thread, 1);
2867
2868 event_child->stop_pc += increment_pc;
2869 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2870
2871 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2872 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2873 }
2874 }
2875
2876 /* If this event was not handled before, and is not a SIGTRAP, we
2877 report it. SIGILL and SIGSEGV are also treated as traps in case
2878 a breakpoint is inserted at the current PC. If this target does
2879 not support internal breakpoints at all, we also report the
2880 SIGTRAP without further processing; it's of no concern to us. */
2881 maybe_internal_trap
2882 = (supports_breakpoints ()
2883 && (WSTOPSIG (w) == SIGTRAP
2884 || ((WSTOPSIG (w) == SIGILL
2885 || WSTOPSIG (w) == SIGSEGV)
2886 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2887
2888 if (maybe_internal_trap)
2889 {
2890 /* Handle anything that requires bookkeeping before deciding to
2891 report the event or continue waiting. */
2892
2893 /* First check if we can explain the SIGTRAP with an internal
2894 breakpoint, or if we should possibly report the event to GDB.
2895 Do this before anything that may remove or insert a
2896 breakpoint. */
2897 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2898
2899 /* We have a SIGTRAP, possibly a step-over dance has just
2900 finished. If so, tweak the state machine accordingly,
2901 reinsert breakpoints and delete any reinsert (software
2902 single-step) breakpoints. */
2903 step_over_finished = finish_step_over (event_child);
2904
2905 /* Now invoke the callbacks of any internal breakpoints there. */
2906 check_breakpoints (event_child->stop_pc);
2907
2908 /* Handle tracepoint data collecting. This may overflow the
2909 trace buffer, and cause a tracing stop, removing
2910 breakpoints. */
2911 trace_event = handle_tracepoints (event_child);
2912
2913 if (bp_explains_trap)
2914 {
2915 /* If we stepped or ran into an internal breakpoint, we've
2916 already handled it. So next time we resume (from this
2917 PC), we should step over it. */
2918 if (debug_threads)
2919 debug_printf ("Hit a gdbserver breakpoint.\n");
2920
2921 if (breakpoint_here (event_child->stop_pc))
2922 event_child->need_step_over = 1;
2923 }
2924 }
2925 else
2926 {
2927 /* We have some other signal, possibly a step-over dance was in
2928 progress, and it should be cancelled too. */
2929 step_over_finished = finish_step_over (event_child);
2930 }
2931
2932 /* We have all the data we need. Either report the event to GDB, or
2933 resume threads and keep waiting for more. */
2934
2935 /* If we're collecting a fast tracepoint, finish the collection and
2936 move out of the jump pad before delivering a signal. See
2937 linux_stabilize_threads. */
2938
2939 if (WIFSTOPPED (w)
2940 && WSTOPSIG (w) != SIGTRAP
2941 && supports_fast_tracepoints ()
2942 && agent_loaded_p ())
2943 {
2944 if (debug_threads)
2945 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2946 "to defer or adjust it.\n",
2947 WSTOPSIG (w), lwpid_of (current_thread));
2948
2949 /* Allow debugging the jump pad itself. */
2950 if (current_thread->last_resume_kind != resume_step
2951 && maybe_move_out_of_jump_pad (event_child, &w))
2952 {
2953 enqueue_one_deferred_signal (event_child, &w);
2954
2955 if (debug_threads)
2956 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2957 WSTOPSIG (w), lwpid_of (current_thread));
2958
2959 linux_resume_one_lwp (event_child, 0, 0, NULL);
2960
2961 return ignore_event (ourstatus);
2962 }
2963 }
2964
2965 if (event_child->collecting_fast_tracepoint)
2966 {
2967 if (debug_threads)
2968 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2969 "Check if we're already there.\n",
2970 lwpid_of (current_thread),
2971 event_child->collecting_fast_tracepoint);
2972
2973 trace_event = 1;
2974
2975 event_child->collecting_fast_tracepoint
2976 = linux_fast_tracepoint_collecting (event_child, NULL);
2977
2978 if (event_child->collecting_fast_tracepoint != 1)
2979 {
2980 /* No longer need this breakpoint. */
2981 if (event_child->exit_jump_pad_bkpt != NULL)
2982 {
2983 if (debug_threads)
2984 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2985 "stopping all threads momentarily.\n");
2986
2987 /* Other running threads could hit this breakpoint.
2988 We don't handle moribund locations like GDB does,
2989 instead we always pause all threads when removing
2990 breakpoints, so that any step-over or
2991 decr_pc_after_break adjustment is always taken
2992 care of while the breakpoint is still
2993 inserted. */
2994 stop_all_lwps (1, event_child);
2995
2996 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2997 event_child->exit_jump_pad_bkpt = NULL;
2998
2999 unstop_all_lwps (1, event_child);
3000
3001 gdb_assert (event_child->suspended >= 0);
3002 }
3003 }
3004
3005 if (event_child->collecting_fast_tracepoint == 0)
3006 {
3007 if (debug_threads)
3008 debug_printf ("fast tracepoint finished "
3009 "collecting successfully.\n");
3010
3011 /* We may have a deferred signal to report. */
3012 if (dequeue_one_deferred_signal (event_child, &w))
3013 {
3014 if (debug_threads)
3015 debug_printf ("dequeued one signal.\n");
3016 }
3017 else
3018 {
3019 if (debug_threads)
3020 debug_printf ("no deferred signals.\n");
3021
3022 if (stabilizing_threads)
3023 {
3024 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3025 ourstatus->value.sig = GDB_SIGNAL_0;
3026
3027 if (debug_threads)
3028 {
3029 debug_printf ("linux_wait_1 ret = %s, stopped "
3030 "while stabilizing threads\n",
3031 target_pid_to_str (ptid_of (current_thread)));
3032 debug_exit ();
3033 }
3034
3035 return ptid_of (current_thread);
3036 }
3037 }
3038 }
3039 }
3040
3041 /* Check whether GDB would be interested in this event. */
3042
3043 /* If GDB is not interested in this signal, don't stop other
3044 threads, and don't report it to GDB. Just resume the inferior
3045 right away. We do this for threading-related signals as well as
3046 any that GDB specifically requested we ignore. But never ignore
3047 SIGSTOP if we sent it ourselves, and do not ignore signals when
3048 stepping - they may require special handling to skip the signal
3049 handler. Also never ignore signals that could be caused by a
3050 breakpoint. */
3051 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3052 thread library? */
3053 if (WIFSTOPPED (w)
3054 && current_thread->last_resume_kind != resume_step
3055 && (
3056 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3057 (current_process ()->priv->thread_db != NULL
3058 && (WSTOPSIG (w) == __SIGRTMIN
3059 || WSTOPSIG (w) == __SIGRTMIN + 1))
3060 ||
3061 #endif
3062 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3063 && !(WSTOPSIG (w) == SIGSTOP
3064 && current_thread->last_resume_kind == resume_stop)
3065 && !linux_wstatus_maybe_breakpoint (w))))
3066 {
3067 siginfo_t info, *info_p;
3068
3069 if (debug_threads)
3070 debug_printf ("Ignored signal %d for LWP %ld.\n",
3071 WSTOPSIG (w), lwpid_of (current_thread));
3072
3073 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3074 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3075 info_p = &info;
3076 else
3077 info_p = NULL;
3078 linux_resume_one_lwp (event_child, event_child->stepping,
3079 WSTOPSIG (w), info_p);
3080 return ignore_event (ourstatus);
3081 }
3082
3083 /* Note that all addresses are always "out of the step range" when
3084 there's no range to begin with. */
3085 in_step_range = lwp_in_step_range (event_child);
3086
3087 /* If GDB wanted this thread to single step, and the thread is out
3088 of the step range, we always want to report the SIGTRAP, and let
3089 GDB handle it. Watchpoints should always be reported. So should
3090 signals we can't explain. A SIGTRAP we can't explain could be a
3091 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3092 do, we're be able to handle GDB breakpoints on top of internal
3093 breakpoints, by handling the internal breakpoint and still
3094 reporting the event to GDB. If we don't, we're out of luck, GDB
3095 won't see the breakpoint hit. */
3096 report_to_gdb = (!maybe_internal_trap
3097 || (current_thread->last_resume_kind == resume_step
3098 && !in_step_range)
3099 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3100 || (!step_over_finished && !in_step_range
3101 && !bp_explains_trap && !trace_event)
3102 || (gdb_breakpoint_here (event_child->stop_pc)
3103 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3104 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3105 || extended_event_reported (&event_child->waitstatus));
3106
3107 run_breakpoint_commands (event_child->stop_pc);
3108
3109 /* We found no reason GDB would want us to stop. We either hit one
3110 of our own breakpoints, or finished an internal step GDB
3111 shouldn't know about. */
3112 if (!report_to_gdb)
3113 {
3114 if (debug_threads)
3115 {
3116 if (bp_explains_trap)
3117 debug_printf ("Hit a gdbserver breakpoint.\n");
3118 if (step_over_finished)
3119 debug_printf ("Step-over finished.\n");
3120 if (trace_event)
3121 debug_printf ("Tracepoint event.\n");
3122 if (lwp_in_step_range (event_child))
3123 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3124 paddress (event_child->stop_pc),
3125 paddress (event_child->step_range_start),
3126 paddress (event_child->step_range_end));
3127 if (extended_event_reported (&event_child->waitstatus))
3128 {
3129 char *str = target_waitstatus_to_string (ourstatus);
3130 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3131 lwpid_of (get_lwp_thread (event_child)), str);
3132 xfree (str);
3133 }
3134 }
3135
3136 /* We're not reporting this breakpoint to GDB, so apply the
3137 decr_pc_after_break adjustment to the inferior's regcache
3138 ourselves. */
3139
3140 if (the_low_target.set_pc != NULL)
3141 {
3142 struct regcache *regcache
3143 = get_thread_regcache (current_thread, 1);
3144 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3145 }
3146
3147 /* We may have finished stepping over a breakpoint. If so,
3148 we've stopped and suspended all LWPs momentarily except the
3149 stepping one. This is where we resume them all again. We're
3150 going to keep waiting, so use proceed, which handles stepping
3151 over the next breakpoint. */
3152 if (debug_threads)
3153 debug_printf ("proceeding all threads.\n");
3154
3155 if (step_over_finished)
3156 unsuspend_all_lwps (event_child);
3157
3158 proceed_all_lwps ();
3159 return ignore_event (ourstatus);
3160 }
3161
3162 if (debug_threads)
3163 {
3164 if (current_thread->last_resume_kind == resume_step)
3165 {
3166 if (event_child->step_range_start == event_child->step_range_end)
3167 debug_printf ("GDB wanted to single-step, reporting event.\n");
3168 else if (!lwp_in_step_range (event_child))
3169 debug_printf ("Out of step range, reporting event.\n");
3170 }
3171 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3172 debug_printf ("Stopped by watchpoint.\n");
3173 else if (gdb_breakpoint_here (event_child->stop_pc))
3174 debug_printf ("Stopped by GDB breakpoint.\n");
3175 if (debug_threads)
3176 debug_printf ("Hit a non-gdbserver trap event.\n");
3177 }
3178
3179 /* Alright, we're going to report a stop. */
3180
3181 if (!stabilizing_threads)
3182 {
3183 /* In all-stop, stop all threads. */
3184 if (!non_stop)
3185 stop_all_lwps (0, NULL);
3186
3187 /* If we're not waiting for a specific LWP, choose an event LWP
3188 from among those that have had events. Giving equal priority
3189 to all LWPs that have had events helps prevent
3190 starvation. */
3191 if (ptid_equal (ptid, minus_one_ptid))
3192 {
3193 event_child->status_pending_p = 1;
3194 event_child->status_pending = w;
3195
3196 select_event_lwp (&event_child);
3197
3198 /* current_thread and event_child must stay in sync. */
3199 current_thread = get_lwp_thread (event_child);
3200
3201 event_child->status_pending_p = 0;
3202 w = event_child->status_pending;
3203 }
3204
3205 if (step_over_finished)
3206 {
3207 if (!non_stop)
3208 {
3209 /* If we were doing a step-over, all other threads but
3210 the stepping one had been paused in start_step_over,
3211 with their suspend counts incremented. We don't want
3212 to do a full unstop/unpause, because we're in
3213 all-stop mode (so we want threads stopped), but we
3214 still need to unsuspend the other threads, to
3215 decrement their `suspended' count back. */
3216 unsuspend_all_lwps (event_child);
3217 }
3218 else
3219 {
3220 /* If we just finished a step-over, then all threads had
3221 been momentarily paused. In all-stop, that's fine,
3222 we want threads stopped by now anyway. In non-stop,
3223 we need to re-resume threads that GDB wanted to be
3224 running. */
3225 unstop_all_lwps (1, event_child);
3226 }
3227 }
3228
3229 /* Stabilize threads (move out of jump pads). */
3230 if (!non_stop)
3231 stabilize_threads ();
3232 }
3233 else
3234 {
3235 /* If we just finished a step-over, then all threads had been
3236 momentarily paused. In all-stop, that's fine, we want
3237 threads stopped by now anyway. In non-stop, we need to
3238 re-resume threads that GDB wanted to be running. */
3239 if (step_over_finished)
3240 unstop_all_lwps (1, event_child);
3241 }
3242
3243 if (extended_event_reported (&event_child->waitstatus))
3244 {
3245 /* If the reported event is a fork, vfork or exec, let GDB know. */
3246 ourstatus->kind = event_child->waitstatus.kind;
3247 ourstatus->value = event_child->waitstatus.value;
3248
3249 /* Clear the event lwp's waitstatus since we handled it already. */
3250 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3251 }
3252 else
3253 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3254
3255 /* Now that we've selected our final event LWP, un-adjust its PC if
3256 it was a software breakpoint, and the client doesn't know we can
3257 adjust the breakpoint ourselves. */
3258 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3259 && !swbreak_feature)
3260 {
3261 int decr_pc = the_low_target.decr_pc_after_break;
3262
3263 if (decr_pc != 0)
3264 {
3265 struct regcache *regcache
3266 = get_thread_regcache (current_thread, 1);
3267 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3268 }
3269 }
3270
3271 if (current_thread->last_resume_kind == resume_stop
3272 && WSTOPSIG (w) == SIGSTOP)
3273 {
3274 /* A thread that has been requested to stop by GDB with vCont;t,
3275 and it stopped cleanly, so report as SIG0. The use of
3276 SIGSTOP is an implementation detail. */
3277 ourstatus->value.sig = GDB_SIGNAL_0;
3278 }
3279 else if (current_thread->last_resume_kind == resume_stop
3280 && WSTOPSIG (w) != SIGSTOP)
3281 {
3282 /* A thread that has been requested to stop by GDB with vCont;t,
3283 but, it stopped for other reasons. */
3284 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3285 }
3286 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3287 {
3288 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3289 }
3290
3291 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3292
3293 if (debug_threads)
3294 {
3295 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3296 target_pid_to_str (ptid_of (current_thread)),
3297 ourstatus->kind, ourstatus->value.sig);
3298 debug_exit ();
3299 }
3300
3301 return ptid_of (current_thread);
3302 }
3303
3304 /* Get rid of any pending event in the pipe. */
3305 static void
3306 async_file_flush (void)
3307 {
3308 int ret;
3309 char buf;
3310
3311 do
3312 ret = read (linux_event_pipe[0], &buf, 1);
3313 while (ret >= 0 || (ret == -1 && errno == EINTR));
3314 }
3315
3316 /* Put something in the pipe, so the event loop wakes up. */
3317 static void
3318 async_file_mark (void)
3319 {
3320 int ret;
3321
3322 async_file_flush ();
3323
3324 do
3325 ret = write (linux_event_pipe[1], "+", 1);
3326 while (ret == 0 || (ret == -1 && errno == EINTR));
3327
3328 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3329 be awakened anyway. */
3330 }
3331
3332 static ptid_t
3333 linux_wait (ptid_t ptid,
3334 struct target_waitstatus *ourstatus, int target_options)
3335 {
3336 ptid_t event_ptid;
3337
3338 /* Flush the async file first. */
3339 if (target_is_async_p ())
3340 async_file_flush ();
3341
3342 do
3343 {
3344 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3345 }
3346 while ((target_options & TARGET_WNOHANG) == 0
3347 && ptid_equal (event_ptid, null_ptid)
3348 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3349
3350 /* If at least one stop was reported, there may be more. A single
3351 SIGCHLD can signal more than one child stop. */
3352 if (target_is_async_p ()
3353 && (target_options & TARGET_WNOHANG) != 0
3354 && !ptid_equal (event_ptid, null_ptid))
3355 async_file_mark ();
3356
3357 return event_ptid;
3358 }
3359
3360 /* Send a signal to an LWP. */
3361
3362 static int
3363 kill_lwp (unsigned long lwpid, int signo)
3364 {
3365 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3366 fails, then we are not using nptl threads and we should be using kill. */
3367
3368 #ifdef __NR_tkill
3369 {
3370 static int tkill_failed;
3371
3372 if (!tkill_failed)
3373 {
3374 int ret;
3375
3376 errno = 0;
3377 ret = syscall (__NR_tkill, lwpid, signo);
3378 if (errno != ENOSYS)
3379 return ret;
3380 tkill_failed = 1;
3381 }
3382 }
3383 #endif
3384
3385 return kill (lwpid, signo);
3386 }
3387
3388 void
3389 linux_stop_lwp (struct lwp_info *lwp)
3390 {
3391 send_sigstop (lwp);
3392 }
3393
3394 static void
3395 send_sigstop (struct lwp_info *lwp)
3396 {
3397 int pid;
3398
3399 pid = lwpid_of (get_lwp_thread (lwp));
3400
3401 /* If we already have a pending stop signal for this process, don't
3402 send another. */
3403 if (lwp->stop_expected)
3404 {
3405 if (debug_threads)
3406 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3407
3408 return;
3409 }
3410
3411 if (debug_threads)
3412 debug_printf ("Sending sigstop to lwp %d\n", pid);
3413
3414 lwp->stop_expected = 1;
3415 kill_lwp (pid, SIGSTOP);
3416 }
3417
3418 static int
3419 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3420 {
3421 struct thread_info *thread = (struct thread_info *) entry;
3422 struct lwp_info *lwp = get_thread_lwp (thread);
3423
3424 /* Ignore EXCEPT. */
3425 if (lwp == except)
3426 return 0;
3427
3428 if (lwp->stopped)
3429 return 0;
3430
3431 send_sigstop (lwp);
3432 return 0;
3433 }
3434
3435 /* Increment the suspend count of an LWP, and stop it, if not stopped
3436 yet. */
3437 static int
3438 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3439 void *except)
3440 {
3441 struct thread_info *thread = (struct thread_info *) entry;
3442 struct lwp_info *lwp = get_thread_lwp (thread);
3443
3444 /* Ignore EXCEPT. */
3445 if (lwp == except)
3446 return 0;
3447
3448 lwp->suspended++;
3449
3450 return send_sigstop_callback (entry, except);
3451 }
3452
3453 static void
3454 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3455 {
3456 /* It's dead, really. */
3457 lwp->dead = 1;
3458
3459 /* Store the exit status for later. */
3460 lwp->status_pending_p = 1;
3461 lwp->status_pending = wstat;
3462
3463 /* Prevent trying to stop it. */
3464 lwp->stopped = 1;
3465
3466 /* No further stops are expected from a dead lwp. */
3467 lwp->stop_expected = 0;
3468 }
3469
3470 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3471
3472 static void
3473 wait_for_sigstop (void)
3474 {
3475 struct thread_info *saved_thread;
3476 ptid_t saved_tid;
3477 int wstat;
3478 int ret;
3479
3480 saved_thread = current_thread;
3481 if (saved_thread != NULL)
3482 saved_tid = saved_thread->entry.id;
3483 else
3484 saved_tid = null_ptid; /* avoid bogus unused warning */
3485
3486 if (debug_threads)
3487 debug_printf ("wait_for_sigstop: pulling events\n");
3488
3489 /* Passing NULL_PTID as filter indicates we want all events to be
3490 left pending. Eventually this returns when there are no
3491 unwaited-for children left. */
3492 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3493 &wstat, __WALL);
3494 gdb_assert (ret == -1);
3495
3496 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3497 current_thread = saved_thread;
3498 else
3499 {
3500 if (debug_threads)
3501 debug_printf ("Previously current thread died.\n");
3502
3503 if (non_stop)
3504 {
3505 /* We can't change the current inferior behind GDB's back,
3506 otherwise, a subsequent command may apply to the wrong
3507 process. */
3508 current_thread = NULL;
3509 }
3510 else
3511 {
3512 /* Set a valid thread as current. */
3513 set_desired_thread (0);
3514 }
3515 }
3516 }
3517
3518 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3519 move it out, because we need to report the stop event to GDB. For
3520 example, if the user puts a breakpoint in the jump pad, it's
3521 because she wants to debug it. */
3522
3523 static int
3524 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3525 {
3526 struct thread_info *thread = (struct thread_info *) entry;
3527 struct lwp_info *lwp = get_thread_lwp (thread);
3528
3529 gdb_assert (lwp->suspended == 0);
3530 gdb_assert (lwp->stopped);
3531
3532 /* Allow debugging the jump pad, gdb_collect, etc.. */
3533 return (supports_fast_tracepoints ()
3534 && agent_loaded_p ()
3535 && (gdb_breakpoint_here (lwp->stop_pc)
3536 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3537 || thread->last_resume_kind == resume_step)
3538 && linux_fast_tracepoint_collecting (lwp, NULL));
3539 }
3540
3541 static void
3542 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3543 {
3544 struct thread_info *thread = (struct thread_info *) entry;
3545 struct lwp_info *lwp = get_thread_lwp (thread);
3546 int *wstat;
3547
3548 gdb_assert (lwp->suspended == 0);
3549 gdb_assert (lwp->stopped);
3550
3551 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3552
3553 /* Allow debugging the jump pad, gdb_collect, etc. */
3554 if (!gdb_breakpoint_here (lwp->stop_pc)
3555 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3556 && thread->last_resume_kind != resume_step
3557 && maybe_move_out_of_jump_pad (lwp, wstat))
3558 {
3559 if (debug_threads)
3560 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3561 lwpid_of (thread));
3562
3563 if (wstat)
3564 {
3565 lwp->status_pending_p = 0;
3566 enqueue_one_deferred_signal (lwp, wstat);
3567
3568 if (debug_threads)
3569 debug_printf ("Signal %d for LWP %ld deferred "
3570 "(in jump pad)\n",
3571 WSTOPSIG (*wstat), lwpid_of (thread));
3572 }
3573
3574 linux_resume_one_lwp (lwp, 0, 0, NULL);
3575 }
3576 else
3577 lwp->suspended++;
3578 }
3579
3580 static int
3581 lwp_running (struct inferior_list_entry *entry, void *data)
3582 {
3583 struct thread_info *thread = (struct thread_info *) entry;
3584 struct lwp_info *lwp = get_thread_lwp (thread);
3585
3586 if (lwp->dead)
3587 return 0;
3588 if (lwp->stopped)
3589 return 0;
3590 return 1;
3591 }
3592
3593 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3594 If SUSPEND, then also increase the suspend count of every LWP,
3595 except EXCEPT. */
3596
3597 static void
3598 stop_all_lwps (int suspend, struct lwp_info *except)
3599 {
3600 /* Should not be called recursively. */
3601 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3602
3603 if (debug_threads)
3604 {
3605 debug_enter ();
3606 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3607 suspend ? "stop-and-suspend" : "stop",
3608 except != NULL
3609 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3610 : "none");
3611 }
3612
3613 stopping_threads = (suspend
3614 ? STOPPING_AND_SUSPENDING_THREADS
3615 : STOPPING_THREADS);
3616
3617 if (suspend)
3618 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3619 else
3620 find_inferior (&all_threads, send_sigstop_callback, except);
3621 wait_for_sigstop ();
3622 stopping_threads = NOT_STOPPING_THREADS;
3623
3624 if (debug_threads)
3625 {
3626 debug_printf ("stop_all_lwps done, setting stopping_threads "
3627 "back to !stopping\n");
3628 debug_exit ();
3629 }
3630 }
3631
3632 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3633 SIGNAL is nonzero, give it that signal. */
3634
3635 static void
3636 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3637 int step, int signal, siginfo_t *info)
3638 {
3639 struct thread_info *thread = get_lwp_thread (lwp);
3640 struct thread_info *saved_thread;
3641 int fast_tp_collecting;
3642
3643 if (lwp->stopped == 0)
3644 return;
3645
3646 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3647
3648 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3649
3650 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3651 user used the "jump" command, or "set $pc = foo"). */
3652 if (lwp->stop_pc != get_pc (lwp))
3653 {
3654 /* Collecting 'while-stepping' actions doesn't make sense
3655 anymore. */
3656 release_while_stepping_state_list (thread);
3657 }
3658
3659 /* If we have pending signals or status, and a new signal, enqueue the
3660 signal. Also enqueue the signal if we are waiting to reinsert a
3661 breakpoint; it will be picked up again below. */
3662 if (signal != 0
3663 && (lwp->status_pending_p
3664 || lwp->pending_signals != NULL
3665 || lwp->bp_reinsert != 0
3666 || fast_tp_collecting))
3667 {
3668 struct pending_signals *p_sig;
3669 p_sig = xmalloc (sizeof (*p_sig));
3670 p_sig->prev = lwp->pending_signals;
3671 p_sig->signal = signal;
3672 if (info == NULL)
3673 memset (&p_sig->info, 0, sizeof (siginfo_t));
3674 else
3675 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3676 lwp->pending_signals = p_sig;
3677 }
3678
3679 if (lwp->status_pending_p)
3680 {
3681 if (debug_threads)
3682 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3683 " has pending status\n",
3684 lwpid_of (thread), step ? "step" : "continue", signal,
3685 lwp->stop_expected ? "expected" : "not expected");
3686 return;
3687 }
3688
3689 saved_thread = current_thread;
3690 current_thread = thread;
3691
3692 if (debug_threads)
3693 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3694 lwpid_of (thread), step ? "step" : "continue", signal,
3695 lwp->stop_expected ? "expected" : "not expected");
3696
3697 /* This bit needs some thinking about. If we get a signal that
3698 we must report while a single-step reinsert is still pending,
3699 we often end up resuming the thread. It might be better to
3700 (ew) allow a stack of pending events; then we could be sure that
3701 the reinsert happened right away and not lose any signals.
3702
3703 Making this stack would also shrink the window in which breakpoints are
3704 uninserted (see comment in linux_wait_for_lwp) but not enough for
3705 complete correctness, so it won't solve that problem. It may be
3706 worthwhile just to solve this one, however. */
3707 if (lwp->bp_reinsert != 0)
3708 {
3709 if (debug_threads)
3710 debug_printf (" pending reinsert at 0x%s\n",
3711 paddress (lwp->bp_reinsert));
3712
3713 if (can_hardware_single_step ())
3714 {
3715 if (fast_tp_collecting == 0)
3716 {
3717 if (step == 0)
3718 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3719 if (lwp->suspended)
3720 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3721 lwp->suspended);
3722 }
3723
3724 step = 1;
3725 }
3726
3727 /* Postpone any pending signal. It was enqueued above. */
3728 signal = 0;
3729 }
3730
3731 if (fast_tp_collecting == 1)
3732 {
3733 if (debug_threads)
3734 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3735 " (exit-jump-pad-bkpt)\n",
3736 lwpid_of (thread));
3737
3738 /* Postpone any pending signal. It was enqueued above. */
3739 signal = 0;
3740 }
3741 else if (fast_tp_collecting == 2)
3742 {
3743 if (debug_threads)
3744 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3745 " single-stepping\n",
3746 lwpid_of (thread));
3747
3748 if (can_hardware_single_step ())
3749 step = 1;
3750 else
3751 {
3752 internal_error (__FILE__, __LINE__,
3753 "moving out of jump pad single-stepping"
3754 " not implemented on this target");
3755 }
3756
3757 /* Postpone any pending signal. It was enqueued above. */
3758 signal = 0;
3759 }
3760
3761 /* If we have while-stepping actions in this thread set it stepping.
3762 If we have a signal to deliver, it may or may not be set to
3763 SIG_IGN, we don't know. Assume so, and allow collecting
3764 while-stepping into a signal handler. A possible smart thing to
3765 do would be to set an internal breakpoint at the signal return
3766 address, continue, and carry on catching this while-stepping
3767 action only when that breakpoint is hit. A future
3768 enhancement. */
3769 if (thread->while_stepping != NULL
3770 && can_hardware_single_step ())
3771 {
3772 if (debug_threads)
3773 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3774 lwpid_of (thread));
3775 step = 1;
3776 }
3777
3778 if (the_low_target.get_pc != NULL)
3779 {
3780 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3781
3782 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3783
3784 if (debug_threads)
3785 {
3786 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3787 (long) lwp->stop_pc);
3788 }
3789 }
3790
3791 /* If we have pending signals, consume one unless we are trying to
3792 reinsert a breakpoint or we're trying to finish a fast tracepoint
3793 collect. */
3794 if (lwp->pending_signals != NULL
3795 && lwp->bp_reinsert == 0
3796 && fast_tp_collecting == 0)
3797 {
3798 struct pending_signals **p_sig;
3799
3800 p_sig = &lwp->pending_signals;
3801 while ((*p_sig)->prev != NULL)
3802 p_sig = &(*p_sig)->prev;
3803
3804 signal = (*p_sig)->signal;
3805 if ((*p_sig)->info.si_signo != 0)
3806 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3807 &(*p_sig)->info);
3808
3809 free (*p_sig);
3810 *p_sig = NULL;
3811 }
3812
3813 if (the_low_target.prepare_to_resume != NULL)
3814 the_low_target.prepare_to_resume (lwp);
3815
3816 regcache_invalidate_thread (thread);
3817 errno = 0;
3818 lwp->stepping = step;
3819 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3820 (PTRACE_TYPE_ARG3) 0,
3821 /* Coerce to a uintptr_t first to avoid potential gcc warning
3822 of coercing an 8 byte integer to a 4 byte pointer. */
3823 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3824
3825 current_thread = saved_thread;
3826 if (errno)
3827 perror_with_name ("resuming thread");
3828
3829 /* Successfully resumed. Clear state that no longer makes sense,
3830 and mark the LWP as running. Must not do this before resuming
3831 otherwise if that fails other code will be confused. E.g., we'd
3832 later try to stop the LWP and hang forever waiting for a stop
3833 status. Note that we must not throw after this is cleared,
3834 otherwise handle_zombie_lwp_error would get confused. */
3835 lwp->stopped = 0;
3836 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3837 }
3838
3839 /* Called when we try to resume a stopped LWP and that errors out. If
3840 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3841 or about to become), discard the error, clear any pending status
3842 the LWP may have, and return true (we'll collect the exit status
3843 soon enough). Otherwise, return false. */
3844
3845 static int
3846 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3847 {
3848 struct thread_info *thread = get_lwp_thread (lp);
3849
3850 /* If we get an error after resuming the LWP successfully, we'd
3851 confuse !T state for the LWP being gone. */
3852 gdb_assert (lp->stopped);
3853
3854 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3855 because even if ptrace failed with ESRCH, the tracee may be "not
3856 yet fully dead", but already refusing ptrace requests. In that
3857 case the tracee has 'R (Running)' state for a little bit
3858 (observed in Linux 3.18). See also the note on ESRCH in the
3859 ptrace(2) man page. Instead, check whether the LWP has any state
3860 other than ptrace-stopped. */
3861
3862 /* Don't assume anything if /proc/PID/status can't be read. */
3863 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3864 {
3865 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3866 lp->status_pending_p = 0;
3867 return 1;
3868 }
3869 return 0;
3870 }
3871
3872 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3873 disappears while we try to resume it. */
3874
3875 static void
3876 linux_resume_one_lwp (struct lwp_info *lwp,
3877 int step, int signal, siginfo_t *info)
3878 {
3879 TRY
3880 {
3881 linux_resume_one_lwp_throw (lwp, step, signal, info);
3882 }
3883 CATCH (ex, RETURN_MASK_ERROR)
3884 {
3885 if (!check_ptrace_stopped_lwp_gone (lwp))
3886 throw_exception (ex);
3887 }
3888 END_CATCH
3889 }
3890
3891 struct thread_resume_array
3892 {
3893 struct thread_resume *resume;
3894 size_t n;
3895 };
3896
3897 /* This function is called once per thread via find_inferior.
3898 ARG is a pointer to a thread_resume_array struct.
3899 We look up the thread specified by ENTRY in ARG, and mark the thread
3900 with a pointer to the appropriate resume request.
3901
3902 This algorithm is O(threads * resume elements), but resume elements
3903 is small (and will remain small at least until GDB supports thread
3904 suspension). */
3905
3906 static int
3907 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3908 {
3909 struct thread_info *thread = (struct thread_info *) entry;
3910 struct lwp_info *lwp = get_thread_lwp (thread);
3911 int ndx;
3912 struct thread_resume_array *r;
3913
3914 r = arg;
3915
3916 for (ndx = 0; ndx < r->n; ndx++)
3917 {
3918 ptid_t ptid = r->resume[ndx].thread;
3919 if (ptid_equal (ptid, minus_one_ptid)
3920 || ptid_equal (ptid, entry->id)
3921 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3922 of PID'. */
3923 || (ptid_get_pid (ptid) == pid_of (thread)
3924 && (ptid_is_pid (ptid)
3925 || ptid_get_lwp (ptid) == -1)))
3926 {
3927 if (r->resume[ndx].kind == resume_stop
3928 && thread->last_resume_kind == resume_stop)
3929 {
3930 if (debug_threads)
3931 debug_printf ("already %s LWP %ld at GDB's request\n",
3932 (thread->last_status.kind
3933 == TARGET_WAITKIND_STOPPED)
3934 ? "stopped"
3935 : "stopping",
3936 lwpid_of (thread));
3937
3938 continue;
3939 }
3940
3941 lwp->resume = &r->resume[ndx];
3942 thread->last_resume_kind = lwp->resume->kind;
3943
3944 lwp->step_range_start = lwp->resume->step_range_start;
3945 lwp->step_range_end = lwp->resume->step_range_end;
3946
3947 /* If we had a deferred signal to report, dequeue one now.
3948 This can happen if LWP gets more than one signal while
3949 trying to get out of a jump pad. */
3950 if (lwp->stopped
3951 && !lwp->status_pending_p
3952 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3953 {
3954 lwp->status_pending_p = 1;
3955
3956 if (debug_threads)
3957 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3958 "leaving status pending.\n",
3959 WSTOPSIG (lwp->status_pending),
3960 lwpid_of (thread));
3961 }
3962
3963 return 0;
3964 }
3965 }
3966
3967 /* No resume action for this thread. */
3968 lwp->resume = NULL;
3969
3970 return 0;
3971 }
3972
3973 /* find_inferior callback for linux_resume.
3974 Set *FLAG_P if this lwp has an interesting status pending. */
3975
3976 static int
3977 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3978 {
3979 struct thread_info *thread = (struct thread_info *) entry;
3980 struct lwp_info *lwp = get_thread_lwp (thread);
3981
3982 /* LWPs which will not be resumed are not interesting, because
3983 we might not wait for them next time through linux_wait. */
3984 if (lwp->resume == NULL)
3985 return 0;
3986
3987 if (thread_still_has_status_pending_p (thread))
3988 * (int *) flag_p = 1;
3989
3990 return 0;
3991 }
3992
3993 /* Return 1 if this lwp that GDB wants running is stopped at an
3994 internal breakpoint that we need to step over. It assumes that any
3995 required STOP_PC adjustment has already been propagated to the
3996 inferior's regcache. */
3997
3998 static int
3999 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4000 {
4001 struct thread_info *thread = (struct thread_info *) entry;
4002 struct lwp_info *lwp = get_thread_lwp (thread);
4003 struct thread_info *saved_thread;
4004 CORE_ADDR pc;
4005
4006 /* LWPs which will not be resumed are not interesting, because we
4007 might not wait for them next time through linux_wait. */
4008
4009 if (!lwp->stopped)
4010 {
4011 if (debug_threads)
4012 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4013 lwpid_of (thread));
4014 return 0;
4015 }
4016
4017 if (thread->last_resume_kind == resume_stop)
4018 {
4019 if (debug_threads)
4020 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4021 " stopped\n",
4022 lwpid_of (thread));
4023 return 0;
4024 }
4025
4026 gdb_assert (lwp->suspended >= 0);
4027
4028 if (lwp->suspended)
4029 {
4030 if (debug_threads)
4031 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4032 lwpid_of (thread));
4033 return 0;
4034 }
4035
4036 if (!lwp->need_step_over)
4037 {
4038 if (debug_threads)
4039 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4040 }
4041
4042 if (lwp->status_pending_p)
4043 {
4044 if (debug_threads)
4045 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4046 " status.\n",
4047 lwpid_of (thread));
4048 return 0;
4049 }
4050
4051 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4052 or we have. */
4053 pc = get_pc (lwp);
4054
4055 /* If the PC has changed since we stopped, then don't do anything,
4056 and let the breakpoint/tracepoint be hit. This happens if, for
4057 instance, GDB handled the decr_pc_after_break subtraction itself,
4058 GDB is OOL stepping this thread, or the user has issued a "jump"
4059 command, or poked thread's registers herself. */
4060 if (pc != lwp->stop_pc)
4061 {
4062 if (debug_threads)
4063 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4064 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4065 lwpid_of (thread),
4066 paddress (lwp->stop_pc), paddress (pc));
4067
4068 lwp->need_step_over = 0;
4069 return 0;
4070 }
4071
4072 saved_thread = current_thread;
4073 current_thread = thread;
4074
4075 /* We can only step over breakpoints we know about. */
4076 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4077 {
4078 /* Don't step over a breakpoint that GDB expects to hit
4079 though. If the condition is being evaluated on the target's side
4080 and it evaluate to false, step over this breakpoint as well. */
4081 if (gdb_breakpoint_here (pc)
4082 && gdb_condition_true_at_breakpoint (pc)
4083 && gdb_no_commands_at_breakpoint (pc))
4084 {
4085 if (debug_threads)
4086 debug_printf ("Need step over [LWP %ld]? yes, but found"
4087 " GDB breakpoint at 0x%s; skipping step over\n",
4088 lwpid_of (thread), paddress (pc));
4089
4090 current_thread = saved_thread;
4091 return 0;
4092 }
4093 else
4094 {
4095 if (debug_threads)
4096 debug_printf ("Need step over [LWP %ld]? yes, "
4097 "found breakpoint at 0x%s\n",
4098 lwpid_of (thread), paddress (pc));
4099
4100 /* We've found an lwp that needs stepping over --- return 1 so
4101 that find_inferior stops looking. */
4102 current_thread = saved_thread;
4103
4104 /* If the step over is cancelled, this is set again. */
4105 lwp->need_step_over = 0;
4106 return 1;
4107 }
4108 }
4109
4110 current_thread = saved_thread;
4111
4112 if (debug_threads)
4113 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4114 " at 0x%s\n",
4115 lwpid_of (thread), paddress (pc));
4116
4117 return 0;
4118 }
4119
4120 /* Start a step-over operation on LWP. When LWP stopped at a
4121 breakpoint, to make progress, we need to remove the breakpoint out
4122 of the way. If we let other threads run while we do that, they may
4123 pass by the breakpoint location and miss hitting it. To avoid
4124 that, a step-over momentarily stops all threads while LWP is
4125 single-stepped while the breakpoint is temporarily uninserted from
4126 the inferior. When the single-step finishes, we reinsert the
4127 breakpoint, and let all threads that are supposed to be running,
4128 run again.
4129
4130 On targets that don't support hardware single-step, we don't
4131 currently support full software single-stepping. Instead, we only
4132 support stepping over the thread event breakpoint, by asking the
4133 low target where to place a reinsert breakpoint. Since this
4134 routine assumes the breakpoint being stepped over is a thread event
4135 breakpoint, it usually assumes the return address of the current
4136 function is a good enough place to set the reinsert breakpoint. */
4137
4138 static int
4139 start_step_over (struct lwp_info *lwp)
4140 {
4141 struct thread_info *thread = get_lwp_thread (lwp);
4142 struct thread_info *saved_thread;
4143 CORE_ADDR pc;
4144 int step;
4145
4146 if (debug_threads)
4147 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4148 lwpid_of (thread));
4149
4150 stop_all_lwps (1, lwp);
4151 gdb_assert (lwp->suspended == 0);
4152
4153 if (debug_threads)
4154 debug_printf ("Done stopping all threads for step-over.\n");
4155
4156 /* Note, we should always reach here with an already adjusted PC,
4157 either by GDB (if we're resuming due to GDB's request), or by our
4158 caller, if we just finished handling an internal breakpoint GDB
4159 shouldn't care about. */
4160 pc = get_pc (lwp);
4161
4162 saved_thread = current_thread;
4163 current_thread = thread;
4164
4165 lwp->bp_reinsert = pc;
4166 uninsert_breakpoints_at (pc);
4167 uninsert_fast_tracepoint_jumps_at (pc);
4168
4169 if (can_hardware_single_step ())
4170 {
4171 step = 1;
4172 }
4173 else
4174 {
4175 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4176 set_reinsert_breakpoint (raddr);
4177 step = 0;
4178 }
4179
4180 current_thread = saved_thread;
4181
4182 linux_resume_one_lwp (lwp, step, 0, NULL);
4183
4184 /* Require next event from this LWP. */
4185 step_over_bkpt = thread->entry.id;
4186 return 1;
4187 }
4188
4189 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4190 start_step_over, if still there, and delete any reinsert
4191 breakpoints we've set, on non hardware single-step targets. */
4192
4193 static int
4194 finish_step_over (struct lwp_info *lwp)
4195 {
4196 if (lwp->bp_reinsert != 0)
4197 {
4198 if (debug_threads)
4199 debug_printf ("Finished step over.\n");
4200
4201 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4202 may be no breakpoint to reinsert there by now. */
4203 reinsert_breakpoints_at (lwp->bp_reinsert);
4204 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4205
4206 lwp->bp_reinsert = 0;
4207
4208 /* Delete any software-single-step reinsert breakpoints. No
4209 longer needed. We don't have to worry about other threads
4210 hitting this trap, and later not being able to explain it,
4211 because we were stepping over a breakpoint, and we hold all
4212 threads but LWP stopped while doing that. */
4213 if (!can_hardware_single_step ())
4214 delete_reinsert_breakpoints ();
4215
4216 step_over_bkpt = null_ptid;
4217 return 1;
4218 }
4219 else
4220 return 0;
4221 }
4222
4223 /* This function is called once per thread. We check the thread's resume
4224 request, which will tell us whether to resume, step, or leave the thread
4225 stopped; and what signal, if any, it should be sent.
4226
4227 For threads which we aren't explicitly told otherwise, we preserve
4228 the stepping flag; this is used for stepping over gdbserver-placed
4229 breakpoints.
4230
4231 If pending_flags was set in any thread, we queue any needed
4232 signals, since we won't actually resume. We already have a pending
4233 event to report, so we don't need to preserve any step requests;
4234 they should be re-issued if necessary. */
4235
4236 static int
4237 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4238 {
4239 struct thread_info *thread = (struct thread_info *) entry;
4240 struct lwp_info *lwp = get_thread_lwp (thread);
4241 int step;
4242 int leave_all_stopped = * (int *) arg;
4243 int leave_pending;
4244
4245 if (lwp->resume == NULL)
4246 return 0;
4247
4248 if (lwp->resume->kind == resume_stop)
4249 {
4250 if (debug_threads)
4251 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4252
4253 if (!lwp->stopped)
4254 {
4255 if (debug_threads)
4256 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4257
4258 /* Stop the thread, and wait for the event asynchronously,
4259 through the event loop. */
4260 send_sigstop (lwp);
4261 }
4262 else
4263 {
4264 if (debug_threads)
4265 debug_printf ("already stopped LWP %ld\n",
4266 lwpid_of (thread));
4267
4268 /* The LWP may have been stopped in an internal event that
4269 was not meant to be notified back to GDB (e.g., gdbserver
4270 breakpoint), so we should be reporting a stop event in
4271 this case too. */
4272
4273 /* If the thread already has a pending SIGSTOP, this is a
4274 no-op. Otherwise, something later will presumably resume
4275 the thread and this will cause it to cancel any pending
4276 operation, due to last_resume_kind == resume_stop. If
4277 the thread already has a pending status to report, we
4278 will still report it the next time we wait - see
4279 status_pending_p_callback. */
4280
4281 /* If we already have a pending signal to report, then
4282 there's no need to queue a SIGSTOP, as this means we're
4283 midway through moving the LWP out of the jumppad, and we
4284 will report the pending signal as soon as that is
4285 finished. */
4286 if (lwp->pending_signals_to_report == NULL)
4287 send_sigstop (lwp);
4288 }
4289
4290 /* For stop requests, we're done. */
4291 lwp->resume = NULL;
4292 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4293 return 0;
4294 }
4295
4296 /* If this thread which is about to be resumed has a pending status,
4297 then don't resume any threads - we can just report the pending
4298 status. Make sure to queue any signals that would otherwise be
4299 sent. In all-stop mode, we do this decision based on if *any*
4300 thread has a pending status. If there's a thread that needs the
4301 step-over-breakpoint dance, then don't resume any other thread
4302 but that particular one. */
4303 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4304
4305 if (!leave_pending)
4306 {
4307 if (debug_threads)
4308 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4309
4310 step = (lwp->resume->kind == resume_step);
4311 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4312 }
4313 else
4314 {
4315 if (debug_threads)
4316 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4317
4318 /* If we have a new signal, enqueue the signal. */
4319 if (lwp->resume->sig != 0)
4320 {
4321 struct pending_signals *p_sig;
4322 p_sig = xmalloc (sizeof (*p_sig));
4323 p_sig->prev = lwp->pending_signals;
4324 p_sig->signal = lwp->resume->sig;
4325 memset (&p_sig->info, 0, sizeof (siginfo_t));
4326
4327 /* If this is the same signal we were previously stopped by,
4328 make sure to queue its siginfo. We can ignore the return
4329 value of ptrace; if it fails, we'll skip
4330 PTRACE_SETSIGINFO. */
4331 if (WIFSTOPPED (lwp->last_status)
4332 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4333 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4334 &p_sig->info);
4335
4336 lwp->pending_signals = p_sig;
4337 }
4338 }
4339
4340 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4341 lwp->resume = NULL;
4342 return 0;
4343 }
4344
4345 static void
4346 linux_resume (struct thread_resume *resume_info, size_t n)
4347 {
4348 struct thread_resume_array array = { resume_info, n };
4349 struct thread_info *need_step_over = NULL;
4350 int any_pending;
4351 int leave_all_stopped;
4352
4353 if (debug_threads)
4354 {
4355 debug_enter ();
4356 debug_printf ("linux_resume:\n");
4357 }
4358
4359 find_inferior (&all_threads, linux_set_resume_request, &array);
4360
4361 /* If there is a thread which would otherwise be resumed, which has
4362 a pending status, then don't resume any threads - we can just
4363 report the pending status. Make sure to queue any signals that
4364 would otherwise be sent. In non-stop mode, we'll apply this
4365 logic to each thread individually. We consume all pending events
4366 before considering to start a step-over (in all-stop). */
4367 any_pending = 0;
4368 if (!non_stop)
4369 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4370
4371 /* If there is a thread which would otherwise be resumed, which is
4372 stopped at a breakpoint that needs stepping over, then don't
4373 resume any threads - have it step over the breakpoint with all
4374 other threads stopped, then resume all threads again. Make sure
4375 to queue any signals that would otherwise be delivered or
4376 queued. */
4377 if (!any_pending && supports_breakpoints ())
4378 need_step_over
4379 = (struct thread_info *) find_inferior (&all_threads,
4380 need_step_over_p, NULL);
4381
4382 leave_all_stopped = (need_step_over != NULL || any_pending);
4383
4384 if (debug_threads)
4385 {
4386 if (need_step_over != NULL)
4387 debug_printf ("Not resuming all, need step over\n");
4388 else if (any_pending)
4389 debug_printf ("Not resuming, all-stop and found "
4390 "an LWP with pending status\n");
4391 else
4392 debug_printf ("Resuming, no pending status or step over needed\n");
4393 }
4394
4395 /* Even if we're leaving threads stopped, queue all signals we'd
4396 otherwise deliver. */
4397 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4398
4399 if (need_step_over)
4400 start_step_over (get_thread_lwp (need_step_over));
4401
4402 if (debug_threads)
4403 {
4404 debug_printf ("linux_resume done\n");
4405 debug_exit ();
4406 }
4407 }
4408
4409 /* This function is called once per thread. We check the thread's
4410 last resume request, which will tell us whether to resume, step, or
4411 leave the thread stopped. Any signal the client requested to be
4412 delivered has already been enqueued at this point.
4413
4414 If any thread that GDB wants running is stopped at an internal
4415 breakpoint that needs stepping over, we start a step-over operation
4416 on that particular thread, and leave all others stopped. */
4417
4418 static int
4419 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4420 {
4421 struct thread_info *thread = (struct thread_info *) entry;
4422 struct lwp_info *lwp = get_thread_lwp (thread);
4423 int step;
4424
4425 if (lwp == except)
4426 return 0;
4427
4428 if (debug_threads)
4429 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4430
4431 if (!lwp->stopped)
4432 {
4433 if (debug_threads)
4434 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4435 return 0;
4436 }
4437
4438 if (thread->last_resume_kind == resume_stop
4439 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4440 {
4441 if (debug_threads)
4442 debug_printf (" client wants LWP to remain %ld stopped\n",
4443 lwpid_of (thread));
4444 return 0;
4445 }
4446
4447 if (lwp->status_pending_p)
4448 {
4449 if (debug_threads)
4450 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4451 lwpid_of (thread));
4452 return 0;
4453 }
4454
4455 gdb_assert (lwp->suspended >= 0);
4456
4457 if (lwp->suspended)
4458 {
4459 if (debug_threads)
4460 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4461 return 0;
4462 }
4463
4464 if (thread->last_resume_kind == resume_stop
4465 && lwp->pending_signals_to_report == NULL
4466 && lwp->collecting_fast_tracepoint == 0)
4467 {
4468 /* We haven't reported this LWP as stopped yet (otherwise, the
4469 last_status.kind check above would catch it, and we wouldn't
4470 reach here. This LWP may have been momentarily paused by a
4471 stop_all_lwps call while handling for example, another LWP's
4472 step-over. In that case, the pending expected SIGSTOP signal
4473 that was queued at vCont;t handling time will have already
4474 been consumed by wait_for_sigstop, and so we need to requeue
4475 another one here. Note that if the LWP already has a SIGSTOP
4476 pending, this is a no-op. */
4477
4478 if (debug_threads)
4479 debug_printf ("Client wants LWP %ld to stop. "
4480 "Making sure it has a SIGSTOP pending\n",
4481 lwpid_of (thread));
4482
4483 send_sigstop (lwp);
4484 }
4485
4486 step = thread->last_resume_kind == resume_step;
4487 linux_resume_one_lwp (lwp, step, 0, NULL);
4488 return 0;
4489 }
4490
4491 static int
4492 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4493 {
4494 struct thread_info *thread = (struct thread_info *) entry;
4495 struct lwp_info *lwp = get_thread_lwp (thread);
4496
4497 if (lwp == except)
4498 return 0;
4499
4500 lwp->suspended--;
4501 gdb_assert (lwp->suspended >= 0);
4502
4503 return proceed_one_lwp (entry, except);
4504 }
4505
4506 /* When we finish a step-over, set threads running again. If there's
4507 another thread that may need a step-over, now's the time to start
4508 it. Eventually, we'll move all threads past their breakpoints. */
4509
4510 static void
4511 proceed_all_lwps (void)
4512 {
4513 struct thread_info *need_step_over;
4514
4515 /* If there is a thread which would otherwise be resumed, which is
4516 stopped at a breakpoint that needs stepping over, then don't
4517 resume any threads - have it step over the breakpoint with all
4518 other threads stopped, then resume all threads again. */
4519
4520 if (supports_breakpoints ())
4521 {
4522 need_step_over
4523 = (struct thread_info *) find_inferior (&all_threads,
4524 need_step_over_p, NULL);
4525
4526 if (need_step_over != NULL)
4527 {
4528 if (debug_threads)
4529 debug_printf ("proceed_all_lwps: found "
4530 "thread %ld needing a step-over\n",
4531 lwpid_of (need_step_over));
4532
4533 start_step_over (get_thread_lwp (need_step_over));
4534 return;
4535 }
4536 }
4537
4538 if (debug_threads)
4539 debug_printf ("Proceeding, no step-over needed\n");
4540
4541 find_inferior (&all_threads, proceed_one_lwp, NULL);
4542 }
4543
4544 /* Stopped LWPs that the client wanted to be running, that don't have
4545 pending statuses, are set to run again, except for EXCEPT, if not
4546 NULL. This undoes a stop_all_lwps call. */
4547
4548 static void
4549 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4550 {
4551 if (debug_threads)
4552 {
4553 debug_enter ();
4554 if (except)
4555 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4556 lwpid_of (get_lwp_thread (except)));
4557 else
4558 debug_printf ("unstopping all lwps\n");
4559 }
4560
4561 if (unsuspend)
4562 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4563 else
4564 find_inferior (&all_threads, proceed_one_lwp, except);
4565
4566 if (debug_threads)
4567 {
4568 debug_printf ("unstop_all_lwps done\n");
4569 debug_exit ();
4570 }
4571 }
4572
4573
4574 #ifdef HAVE_LINUX_REGSETS
4575
4576 #define use_linux_regsets 1
4577
4578 /* Returns true if REGSET has been disabled. */
4579
4580 static int
4581 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4582 {
4583 return (info->disabled_regsets != NULL
4584 && info->disabled_regsets[regset - info->regsets]);
4585 }
4586
4587 /* Disable REGSET. */
4588
4589 static void
4590 disable_regset (struct regsets_info *info, struct regset_info *regset)
4591 {
4592 int dr_offset;
4593
4594 dr_offset = regset - info->regsets;
4595 if (info->disabled_regsets == NULL)
4596 info->disabled_regsets = xcalloc (1, info->num_regsets);
4597 info->disabled_regsets[dr_offset] = 1;
4598 }
4599
4600 static int
4601 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4602 struct regcache *regcache)
4603 {
4604 struct regset_info *regset;
4605 int saw_general_regs = 0;
4606 int pid;
4607 struct iovec iov;
4608
4609 pid = lwpid_of (current_thread);
4610 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4611 {
4612 void *buf, *data;
4613 int nt_type, res;
4614
4615 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4616 continue;
4617
4618 buf = xmalloc (regset->size);
4619
4620 nt_type = regset->nt_type;
4621 if (nt_type)
4622 {
4623 iov.iov_base = buf;
4624 iov.iov_len = regset->size;
4625 data = (void *) &iov;
4626 }
4627 else
4628 data = buf;
4629
4630 #ifndef __sparc__
4631 res = ptrace (regset->get_request, pid,
4632 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4633 #else
4634 res = ptrace (regset->get_request, pid, data, nt_type);
4635 #endif
4636 if (res < 0)
4637 {
4638 if (errno == EIO)
4639 {
4640 /* If we get EIO on a regset, do not try it again for
4641 this process mode. */
4642 disable_regset (regsets_info, regset);
4643 }
4644 else if (errno == ENODATA)
4645 {
4646 /* ENODATA may be returned if the regset is currently
4647 not "active". This can happen in normal operation,
4648 so suppress the warning in this case. */
4649 }
4650 else
4651 {
4652 char s[256];
4653 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4654 pid);
4655 perror (s);
4656 }
4657 }
4658 else
4659 {
4660 if (regset->type == GENERAL_REGS)
4661 saw_general_regs = 1;
4662 regset->store_function (regcache, buf);
4663 }
4664 free (buf);
4665 }
4666 if (saw_general_regs)
4667 return 0;
4668 else
4669 return 1;
4670 }
4671
4672 static int
4673 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4674 struct regcache *regcache)
4675 {
4676 struct regset_info *regset;
4677 int saw_general_regs = 0;
4678 int pid;
4679 struct iovec iov;
4680
4681 pid = lwpid_of (current_thread);
4682 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4683 {
4684 void *buf, *data;
4685 int nt_type, res;
4686
4687 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4688 || regset->fill_function == NULL)
4689 continue;
4690
4691 buf = xmalloc (regset->size);
4692
4693 /* First fill the buffer with the current register set contents,
4694 in case there are any items in the kernel's regset that are
4695 not in gdbserver's regcache. */
4696
4697 nt_type = regset->nt_type;
4698 if (nt_type)
4699 {
4700 iov.iov_base = buf;
4701 iov.iov_len = regset->size;
4702 data = (void *) &iov;
4703 }
4704 else
4705 data = buf;
4706
4707 #ifndef __sparc__
4708 res = ptrace (regset->get_request, pid,
4709 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4710 #else
4711 res = ptrace (regset->get_request, pid, data, nt_type);
4712 #endif
4713
4714 if (res == 0)
4715 {
4716 /* Then overlay our cached registers on that. */
4717 regset->fill_function (regcache, buf);
4718
4719 /* Only now do we write the register set. */
4720 #ifndef __sparc__
4721 res = ptrace (regset->set_request, pid,
4722 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4723 #else
4724 res = ptrace (regset->set_request, pid, data, nt_type);
4725 #endif
4726 }
4727
4728 if (res < 0)
4729 {
4730 if (errno == EIO)
4731 {
4732 /* If we get EIO on a regset, do not try it again for
4733 this process mode. */
4734 disable_regset (regsets_info, regset);
4735 }
4736 else if (errno == ESRCH)
4737 {
4738 /* At this point, ESRCH should mean the process is
4739 already gone, in which case we simply ignore attempts
4740 to change its registers. See also the related
4741 comment in linux_resume_one_lwp. */
4742 free (buf);
4743 return 0;
4744 }
4745 else
4746 {
4747 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4748 }
4749 }
4750 else if (regset->type == GENERAL_REGS)
4751 saw_general_regs = 1;
4752 free (buf);
4753 }
4754 if (saw_general_regs)
4755 return 0;
4756 else
4757 return 1;
4758 }
4759
4760 #else /* !HAVE_LINUX_REGSETS */
4761
4762 #define use_linux_regsets 0
4763 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4764 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4765
4766 #endif
4767
4768 /* Return 1 if register REGNO is supported by one of the regset ptrace
4769 calls or 0 if it has to be transferred individually. */
4770
4771 static int
4772 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4773 {
4774 unsigned char mask = 1 << (regno % 8);
4775 size_t index = regno / 8;
4776
4777 return (use_linux_regsets
4778 && (regs_info->regset_bitmap == NULL
4779 || (regs_info->regset_bitmap[index] & mask) != 0));
4780 }
4781
4782 #ifdef HAVE_LINUX_USRREGS
4783
4784 int
4785 register_addr (const struct usrregs_info *usrregs, int regnum)
4786 {
4787 int addr;
4788
4789 if (regnum < 0 || regnum >= usrregs->num_regs)
4790 error ("Invalid register number %d.", regnum);
4791
4792 addr = usrregs->regmap[regnum];
4793
4794 return addr;
4795 }
4796
4797 /* Fetch one register. */
4798 static void
4799 fetch_register (const struct usrregs_info *usrregs,
4800 struct regcache *regcache, int regno)
4801 {
4802 CORE_ADDR regaddr;
4803 int i, size;
4804 char *buf;
4805 int pid;
4806
4807 if (regno >= usrregs->num_regs)
4808 return;
4809 if ((*the_low_target.cannot_fetch_register) (regno))
4810 return;
4811
4812 regaddr = register_addr (usrregs, regno);
4813 if (regaddr == -1)
4814 return;
4815
4816 size = ((register_size (regcache->tdesc, regno)
4817 + sizeof (PTRACE_XFER_TYPE) - 1)
4818 & -sizeof (PTRACE_XFER_TYPE));
4819 buf = alloca (size);
4820
4821 pid = lwpid_of (current_thread);
4822 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4823 {
4824 errno = 0;
4825 *(PTRACE_XFER_TYPE *) (buf + i) =
4826 ptrace (PTRACE_PEEKUSER, pid,
4827 /* Coerce to a uintptr_t first to avoid potential gcc warning
4828 of coercing an 8 byte integer to a 4 byte pointer. */
4829 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4830 regaddr += sizeof (PTRACE_XFER_TYPE);
4831 if (errno != 0)
4832 error ("reading register %d: %s", regno, strerror (errno));
4833 }
4834
4835 if (the_low_target.supply_ptrace_register)
4836 the_low_target.supply_ptrace_register (regcache, regno, buf);
4837 else
4838 supply_register (regcache, regno, buf);
4839 }
4840
4841 /* Store one register. */
4842 static void
4843 store_register (const struct usrregs_info *usrregs,
4844 struct regcache *regcache, int regno)
4845 {
4846 CORE_ADDR regaddr;
4847 int i, size;
4848 char *buf;
4849 int pid;
4850
4851 if (regno >= usrregs->num_regs)
4852 return;
4853 if ((*the_low_target.cannot_store_register) (regno))
4854 return;
4855
4856 regaddr = register_addr (usrregs, regno);
4857 if (regaddr == -1)
4858 return;
4859
4860 size = ((register_size (regcache->tdesc, regno)
4861 + sizeof (PTRACE_XFER_TYPE) - 1)
4862 & -sizeof (PTRACE_XFER_TYPE));
4863 buf = alloca (size);
4864 memset (buf, 0, size);
4865
4866 if (the_low_target.collect_ptrace_register)
4867 the_low_target.collect_ptrace_register (regcache, regno, buf);
4868 else
4869 collect_register (regcache, regno, buf);
4870
4871 pid = lwpid_of (current_thread);
4872 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4873 {
4874 errno = 0;
4875 ptrace (PTRACE_POKEUSER, pid,
4876 /* Coerce to a uintptr_t first to avoid potential gcc warning
4877 about coercing an 8 byte integer to a 4 byte pointer. */
4878 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4879 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4880 if (errno != 0)
4881 {
4882 /* At this point, ESRCH should mean the process is
4883 already gone, in which case we simply ignore attempts
4884 to change its registers. See also the related
4885 comment in linux_resume_one_lwp. */
4886 if (errno == ESRCH)
4887 return;
4888
4889 if ((*the_low_target.cannot_store_register) (regno) == 0)
4890 error ("writing register %d: %s", regno, strerror (errno));
4891 }
4892 regaddr += sizeof (PTRACE_XFER_TYPE);
4893 }
4894 }
4895
4896 /* Fetch all registers, or just one, from the child process.
4897 If REGNO is -1, do this for all registers, skipping any that are
4898 assumed to have been retrieved by regsets_fetch_inferior_registers,
4899 unless ALL is non-zero.
4900 Otherwise, REGNO specifies which register (so we can save time). */
4901 static void
4902 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4903 struct regcache *regcache, int regno, int all)
4904 {
4905 struct usrregs_info *usr = regs_info->usrregs;
4906
4907 if (regno == -1)
4908 {
4909 for (regno = 0; regno < usr->num_regs; regno++)
4910 if (all || !linux_register_in_regsets (regs_info, regno))
4911 fetch_register (usr, regcache, regno);
4912 }
4913 else
4914 fetch_register (usr, regcache, regno);
4915 }
4916
4917 /* Store our register values back into the inferior.
4918 If REGNO is -1, do this for all registers, skipping any that are
4919 assumed to have been saved by regsets_store_inferior_registers,
4920 unless ALL is non-zero.
4921 Otherwise, REGNO specifies which register (so we can save time). */
4922 static void
4923 usr_store_inferior_registers (const struct regs_info *regs_info,
4924 struct regcache *regcache, int regno, int all)
4925 {
4926 struct usrregs_info *usr = regs_info->usrregs;
4927
4928 if (regno == -1)
4929 {
4930 for (regno = 0; regno < usr->num_regs; regno++)
4931 if (all || !linux_register_in_regsets (regs_info, regno))
4932 store_register (usr, regcache, regno);
4933 }
4934 else
4935 store_register (usr, regcache, regno);
4936 }
4937
4938 #else /* !HAVE_LINUX_USRREGS */
4939
4940 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4941 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4942
4943 #endif
4944
4945
4946 void
4947 linux_fetch_registers (struct regcache *regcache, int regno)
4948 {
4949 int use_regsets;
4950 int all = 0;
4951 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4952
4953 if (regno == -1)
4954 {
4955 if (the_low_target.fetch_register != NULL
4956 && regs_info->usrregs != NULL)
4957 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4958 (*the_low_target.fetch_register) (regcache, regno);
4959
4960 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4961 if (regs_info->usrregs != NULL)
4962 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4963 }
4964 else
4965 {
4966 if (the_low_target.fetch_register != NULL
4967 && (*the_low_target.fetch_register) (regcache, regno))
4968 return;
4969
4970 use_regsets = linux_register_in_regsets (regs_info, regno);
4971 if (use_regsets)
4972 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4973 regcache);
4974 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4975 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4976 }
4977 }
4978
4979 void
4980 linux_store_registers (struct regcache *regcache, int regno)
4981 {
4982 int use_regsets;
4983 int all = 0;
4984 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4985
4986 if (regno == -1)
4987 {
4988 all = regsets_store_inferior_registers (regs_info->regsets_info,
4989 regcache);
4990 if (regs_info->usrregs != NULL)
4991 usr_store_inferior_registers (regs_info, regcache, regno, all);
4992 }
4993 else
4994 {
4995 use_regsets = linux_register_in_regsets (regs_info, regno);
4996 if (use_regsets)
4997 all = regsets_store_inferior_registers (regs_info->regsets_info,
4998 regcache);
4999 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5000 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5001 }
5002 }
5003
5004
5005 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5006 to debugger memory starting at MYADDR. */
5007
5008 static int
5009 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5010 {
5011 int pid = lwpid_of (current_thread);
5012 register PTRACE_XFER_TYPE *buffer;
5013 register CORE_ADDR addr;
5014 register int count;
5015 char filename[64];
5016 register int i;
5017 int ret;
5018 int fd;
5019
5020 /* Try using /proc. Don't bother for one word. */
5021 if (len >= 3 * sizeof (long))
5022 {
5023 int bytes;
5024
5025 /* We could keep this file open and cache it - possibly one per
5026 thread. That requires some juggling, but is even faster. */
5027 sprintf (filename, "/proc/%d/mem", pid);
5028 fd = open (filename, O_RDONLY | O_LARGEFILE);
5029 if (fd == -1)
5030 goto no_proc;
5031
5032 /* If pread64 is available, use it. It's faster if the kernel
5033 supports it (only one syscall), and it's 64-bit safe even on
5034 32-bit platforms (for instance, SPARC debugging a SPARC64
5035 application). */
5036 #ifdef HAVE_PREAD64
5037 bytes = pread64 (fd, myaddr, len, memaddr);
5038 #else
5039 bytes = -1;
5040 if (lseek (fd, memaddr, SEEK_SET) != -1)
5041 bytes = read (fd, myaddr, len);
5042 #endif
5043
5044 close (fd);
5045 if (bytes == len)
5046 return 0;
5047
5048 /* Some data was read, we'll try to get the rest with ptrace. */
5049 if (bytes > 0)
5050 {
5051 memaddr += bytes;
5052 myaddr += bytes;
5053 len -= bytes;
5054 }
5055 }
5056
5057 no_proc:
5058 /* Round starting address down to longword boundary. */
5059 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5060 /* Round ending address up; get number of longwords that makes. */
5061 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5062 / sizeof (PTRACE_XFER_TYPE));
5063 /* Allocate buffer of that many longwords. */
5064 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5065
5066 /* Read all the longwords */
5067 errno = 0;
5068 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5069 {
5070 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5071 about coercing an 8 byte integer to a 4 byte pointer. */
5072 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5073 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5074 (PTRACE_TYPE_ARG4) 0);
5075 if (errno)
5076 break;
5077 }
5078 ret = errno;
5079
5080 /* Copy appropriate bytes out of the buffer. */
5081 if (i > 0)
5082 {
5083 i *= sizeof (PTRACE_XFER_TYPE);
5084 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5085 memcpy (myaddr,
5086 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5087 i < len ? i : len);
5088 }
5089
5090 return ret;
5091 }
5092
5093 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5094 memory at MEMADDR. On failure (cannot write to the inferior)
5095 returns the value of errno. Always succeeds if LEN is zero. */
5096
5097 static int
5098 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5099 {
5100 register int i;
5101 /* Round starting address down to longword boundary. */
5102 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5103 /* Round ending address up; get number of longwords that makes. */
5104 register int count
5105 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5106 / sizeof (PTRACE_XFER_TYPE);
5107
5108 /* Allocate buffer of that many longwords. */
5109 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5110 alloca (count * sizeof (PTRACE_XFER_TYPE));
5111
5112 int pid = lwpid_of (current_thread);
5113
5114 if (len == 0)
5115 {
5116 /* Zero length write always succeeds. */
5117 return 0;
5118 }
5119
5120 if (debug_threads)
5121 {
5122 /* Dump up to four bytes. */
5123 unsigned int val = * (unsigned int *) myaddr;
5124 if (len == 1)
5125 val = val & 0xff;
5126 else if (len == 2)
5127 val = val & 0xffff;
5128 else if (len == 3)
5129 val = val & 0xffffff;
5130 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5131 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5132 }
5133
5134 /* Fill start and end extra bytes of buffer with existing memory data. */
5135
5136 errno = 0;
5137 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5138 about coercing an 8 byte integer to a 4 byte pointer. */
5139 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5140 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5141 (PTRACE_TYPE_ARG4) 0);
5142 if (errno)
5143 return errno;
5144
5145 if (count > 1)
5146 {
5147 errno = 0;
5148 buffer[count - 1]
5149 = ptrace (PTRACE_PEEKTEXT, pid,
5150 /* Coerce to a uintptr_t first to avoid potential gcc warning
5151 about coercing an 8 byte integer to a 4 byte pointer. */
5152 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5153 * sizeof (PTRACE_XFER_TYPE)),
5154 (PTRACE_TYPE_ARG4) 0);
5155 if (errno)
5156 return errno;
5157 }
5158
5159 /* Copy data to be written over corresponding part of buffer. */
5160
5161 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5162 myaddr, len);
5163
5164 /* Write the entire buffer. */
5165
5166 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5167 {
5168 errno = 0;
5169 ptrace (PTRACE_POKETEXT, pid,
5170 /* Coerce to a uintptr_t first to avoid potential gcc warning
5171 about coercing an 8 byte integer to a 4 byte pointer. */
5172 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5173 (PTRACE_TYPE_ARG4) buffer[i]);
5174 if (errno)
5175 return errno;
5176 }
5177
5178 return 0;
5179 }
5180
5181 static void
5182 linux_look_up_symbols (void)
5183 {
5184 #ifdef USE_THREAD_DB
5185 struct process_info *proc = current_process ();
5186
5187 if (proc->priv->thread_db != NULL)
5188 return;
5189
5190 /* If the kernel supports tracing clones, then we don't need to
5191 use the magic thread event breakpoint to learn about
5192 threads. */
5193 thread_db_init (!linux_supports_traceclone ());
5194 #endif
5195 }
5196
5197 static void
5198 linux_request_interrupt (void)
5199 {
5200 extern unsigned long signal_pid;
5201
5202 /* Send a SIGINT to the process group. This acts just like the user
5203 typed a ^C on the controlling terminal. */
5204 kill (-signal_pid, SIGINT);
5205 }
5206
5207 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5208 to debugger memory starting at MYADDR. */
5209
5210 static int
5211 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5212 {
5213 char filename[PATH_MAX];
5214 int fd, n;
5215 int pid = lwpid_of (current_thread);
5216
5217 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5218
5219 fd = open (filename, O_RDONLY);
5220 if (fd < 0)
5221 return -1;
5222
5223 if (offset != (CORE_ADDR) 0
5224 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5225 n = -1;
5226 else
5227 n = read (fd, myaddr, len);
5228
5229 close (fd);
5230
5231 return n;
5232 }
5233
5234 /* These breakpoint and watchpoint related wrapper functions simply
5235 pass on the function call if the target has registered a
5236 corresponding function. */
5237
5238 static int
5239 linux_supports_z_point_type (char z_type)
5240 {
5241 return (the_low_target.supports_z_point_type != NULL
5242 && the_low_target.supports_z_point_type (z_type));
5243 }
5244
5245 static int
5246 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5247 int size, struct raw_breakpoint *bp)
5248 {
5249 if (type == raw_bkpt_type_sw)
5250 return insert_memory_breakpoint (bp);
5251 else if (the_low_target.insert_point != NULL)
5252 return the_low_target.insert_point (type, addr, size, bp);
5253 else
5254 /* Unsupported (see target.h). */
5255 return 1;
5256 }
5257
5258 static int
5259 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5260 int size, struct raw_breakpoint *bp)
5261 {
5262 if (type == raw_bkpt_type_sw)
5263 return remove_memory_breakpoint (bp);
5264 else if (the_low_target.remove_point != NULL)
5265 return the_low_target.remove_point (type, addr, size, bp);
5266 else
5267 /* Unsupported (see target.h). */
5268 return 1;
5269 }
5270
5271 /* Implement the to_stopped_by_sw_breakpoint target_ops
5272 method. */
5273
5274 static int
5275 linux_stopped_by_sw_breakpoint (void)
5276 {
5277 struct lwp_info *lwp = get_thread_lwp (current_thread);
5278
5279 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5280 }
5281
5282 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5283 method. */
5284
5285 static int
5286 linux_supports_stopped_by_sw_breakpoint (void)
5287 {
5288 return USE_SIGTRAP_SIGINFO;
5289 }
5290
5291 /* Implement the to_stopped_by_hw_breakpoint target_ops
5292 method. */
5293
5294 static int
5295 linux_stopped_by_hw_breakpoint (void)
5296 {
5297 struct lwp_info *lwp = get_thread_lwp (current_thread);
5298
5299 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5300 }
5301
5302 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5303 method. */
5304
5305 static int
5306 linux_supports_stopped_by_hw_breakpoint (void)
5307 {
5308 return USE_SIGTRAP_SIGINFO;
5309 }
5310
5311 /* Implement the supports_conditional_breakpoints target_ops
5312 method. */
5313
5314 static int
5315 linux_supports_conditional_breakpoints (void)
5316 {
5317 /* GDBserver needs to step over the breakpoint if the condition is
5318 false. GDBserver software single step is too simple, so disable
5319 conditional breakpoints if the target doesn't have hardware single
5320 step. */
5321 return can_hardware_single_step ();
5322 }
5323
5324 static int
5325 linux_stopped_by_watchpoint (void)
5326 {
5327 struct lwp_info *lwp = get_thread_lwp (current_thread);
5328
5329 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5330 }
5331
5332 static CORE_ADDR
5333 linux_stopped_data_address (void)
5334 {
5335 struct lwp_info *lwp = get_thread_lwp (current_thread);
5336
5337 return lwp->stopped_data_address;
5338 }
5339
5340 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5341 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5342 && defined(PT_TEXT_END_ADDR)
5343
5344 /* This is only used for targets that define PT_TEXT_ADDR,
5345 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5346 the target has different ways of acquiring this information, like
5347 loadmaps. */
5348
5349 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5350 to tell gdb about. */
5351
5352 static int
5353 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5354 {
5355 unsigned long text, text_end, data;
5356 int pid = lwpid_of (current_thread);
5357
5358 errno = 0;
5359
5360 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5361 (PTRACE_TYPE_ARG4) 0);
5362 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5363 (PTRACE_TYPE_ARG4) 0);
5364 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5365 (PTRACE_TYPE_ARG4) 0);
5366
5367 if (errno == 0)
5368 {
5369 /* Both text and data offsets produced at compile-time (and so
5370 used by gdb) are relative to the beginning of the program,
5371 with the data segment immediately following the text segment.
5372 However, the actual runtime layout in memory may put the data
5373 somewhere else, so when we send gdb a data base-address, we
5374 use the real data base address and subtract the compile-time
5375 data base-address from it (which is just the length of the
5376 text segment). BSS immediately follows data in both
5377 cases. */
5378 *text_p = text;
5379 *data_p = data - (text_end - text);
5380
5381 return 1;
5382 }
5383 return 0;
5384 }
5385 #endif
5386
5387 static int
5388 linux_qxfer_osdata (const char *annex,
5389 unsigned char *readbuf, unsigned const char *writebuf,
5390 CORE_ADDR offset, int len)
5391 {
5392 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5393 }
5394
5395 /* Convert a native/host siginfo object, into/from the siginfo in the
5396 layout of the inferiors' architecture. */
5397
5398 static void
5399 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5400 {
5401 int done = 0;
5402
5403 if (the_low_target.siginfo_fixup != NULL)
5404 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5405
5406 /* If there was no callback, or the callback didn't do anything,
5407 then just do a straight memcpy. */
5408 if (!done)
5409 {
5410 if (direction == 1)
5411 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5412 else
5413 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5414 }
5415 }
5416
5417 static int
5418 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5419 unsigned const char *writebuf, CORE_ADDR offset, int len)
5420 {
5421 int pid;
5422 siginfo_t siginfo;
5423 char inf_siginfo[sizeof (siginfo_t)];
5424
5425 if (current_thread == NULL)
5426 return -1;
5427
5428 pid = lwpid_of (current_thread);
5429
5430 if (debug_threads)
5431 debug_printf ("%s siginfo for lwp %d.\n",
5432 readbuf != NULL ? "Reading" : "Writing",
5433 pid);
5434
5435 if (offset >= sizeof (siginfo))
5436 return -1;
5437
5438 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5439 return -1;
5440
5441 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5442 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5443 inferior with a 64-bit GDBSERVER should look the same as debugging it
5444 with a 32-bit GDBSERVER, we need to convert it. */
5445 siginfo_fixup (&siginfo, inf_siginfo, 0);
5446
5447 if (offset + len > sizeof (siginfo))
5448 len = sizeof (siginfo) - offset;
5449
5450 if (readbuf != NULL)
5451 memcpy (readbuf, inf_siginfo + offset, len);
5452 else
5453 {
5454 memcpy (inf_siginfo + offset, writebuf, len);
5455
5456 /* Convert back to ptrace layout before flushing it out. */
5457 siginfo_fixup (&siginfo, inf_siginfo, 1);
5458
5459 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5460 return -1;
5461 }
5462
5463 return len;
5464 }
5465
5466 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5467 so we notice when children change state; as the handler for the
5468 sigsuspend in my_waitpid. */
5469
5470 static void
5471 sigchld_handler (int signo)
5472 {
5473 int old_errno = errno;
5474
5475 if (debug_threads)
5476 {
5477 do
5478 {
5479 /* fprintf is not async-signal-safe, so call write
5480 directly. */
5481 if (write (2, "sigchld_handler\n",
5482 sizeof ("sigchld_handler\n") - 1) < 0)
5483 break; /* just ignore */
5484 } while (0);
5485 }
5486
5487 if (target_is_async_p ())
5488 async_file_mark (); /* trigger a linux_wait */
5489
5490 errno = old_errno;
5491 }
5492
5493 static int
5494 linux_supports_non_stop (void)
5495 {
5496 return 1;
5497 }
5498
5499 static int
5500 linux_async (int enable)
5501 {
5502 int previous = target_is_async_p ();
5503
5504 if (debug_threads)
5505 debug_printf ("linux_async (%d), previous=%d\n",
5506 enable, previous);
5507
5508 if (previous != enable)
5509 {
5510 sigset_t mask;
5511 sigemptyset (&mask);
5512 sigaddset (&mask, SIGCHLD);
5513
5514 sigprocmask (SIG_BLOCK, &mask, NULL);
5515
5516 if (enable)
5517 {
5518 if (pipe (linux_event_pipe) == -1)
5519 {
5520 linux_event_pipe[0] = -1;
5521 linux_event_pipe[1] = -1;
5522 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5523
5524 warning ("creating event pipe failed.");
5525 return previous;
5526 }
5527
5528 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5529 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5530
5531 /* Register the event loop handler. */
5532 add_file_handler (linux_event_pipe[0],
5533 handle_target_event, NULL);
5534
5535 /* Always trigger a linux_wait. */
5536 async_file_mark ();
5537 }
5538 else
5539 {
5540 delete_file_handler (linux_event_pipe[0]);
5541
5542 close (linux_event_pipe[0]);
5543 close (linux_event_pipe[1]);
5544 linux_event_pipe[0] = -1;
5545 linux_event_pipe[1] = -1;
5546 }
5547
5548 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5549 }
5550
5551 return previous;
5552 }
5553
5554 static int
5555 linux_start_non_stop (int nonstop)
5556 {
5557 /* Register or unregister from event-loop accordingly. */
5558 linux_async (nonstop);
5559
5560 if (target_is_async_p () != (nonstop != 0))
5561 return -1;
5562
5563 return 0;
5564 }
5565
5566 static int
5567 linux_supports_multi_process (void)
5568 {
5569 return 1;
5570 }
5571
5572 /* Check if fork events are supported. */
5573
5574 static int
5575 linux_supports_fork_events (void)
5576 {
5577 return linux_supports_tracefork ();
5578 }
5579
5580 /* Check if vfork events are supported. */
5581
5582 static int
5583 linux_supports_vfork_events (void)
5584 {
5585 return linux_supports_tracefork ();
5586 }
5587
5588 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5589 options for the specified lwp. */
5590
5591 static int
5592 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5593 void *args)
5594 {
5595 struct thread_info *thread = (struct thread_info *) entry;
5596 struct lwp_info *lwp = get_thread_lwp (thread);
5597
5598 if (!lwp->stopped)
5599 {
5600 /* Stop the lwp so we can modify its ptrace options. */
5601 lwp->must_set_ptrace_flags = 1;
5602 linux_stop_lwp (lwp);
5603 }
5604 else
5605 {
5606 /* Already stopped; go ahead and set the ptrace options. */
5607 struct process_info *proc = find_process_pid (pid_of (thread));
5608 int options = linux_low_ptrace_options (proc->attached);
5609
5610 linux_enable_event_reporting (lwpid_of (thread), options);
5611 lwp->must_set_ptrace_flags = 0;
5612 }
5613
5614 return 0;
5615 }
5616
5617 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5618 ptrace flags for all inferiors. This is in case the new GDB connection
5619 doesn't support the same set of events that the previous one did. */
5620
5621 static void
5622 linux_handle_new_gdb_connection (void)
5623 {
5624 pid_t pid;
5625
5626 /* Request that all the lwps reset their ptrace options. */
5627 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5628 }
5629
5630 static int
5631 linux_supports_disable_randomization (void)
5632 {
5633 #ifdef HAVE_PERSONALITY
5634 return 1;
5635 #else
5636 return 0;
5637 #endif
5638 }
5639
5640 static int
5641 linux_supports_agent (void)
5642 {
5643 return 1;
5644 }
5645
5646 static int
5647 linux_supports_range_stepping (void)
5648 {
5649 if (*the_low_target.supports_range_stepping == NULL)
5650 return 0;
5651
5652 return (*the_low_target.supports_range_stepping) ();
5653 }
5654
5655 /* Enumerate spufs IDs for process PID. */
5656 static int
5657 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5658 {
5659 int pos = 0;
5660 int written = 0;
5661 char path[128];
5662 DIR *dir;
5663 struct dirent *entry;
5664
5665 sprintf (path, "/proc/%ld/fd", pid);
5666 dir = opendir (path);
5667 if (!dir)
5668 return -1;
5669
5670 rewinddir (dir);
5671 while ((entry = readdir (dir)) != NULL)
5672 {
5673 struct stat st;
5674 struct statfs stfs;
5675 int fd;
5676
5677 fd = atoi (entry->d_name);
5678 if (!fd)
5679 continue;
5680
5681 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5682 if (stat (path, &st) != 0)
5683 continue;
5684 if (!S_ISDIR (st.st_mode))
5685 continue;
5686
5687 if (statfs (path, &stfs) != 0)
5688 continue;
5689 if (stfs.f_type != SPUFS_MAGIC)
5690 continue;
5691
5692 if (pos >= offset && pos + 4 <= offset + len)
5693 {
5694 *(unsigned int *)(buf + pos - offset) = fd;
5695 written += 4;
5696 }
5697 pos += 4;
5698 }
5699
5700 closedir (dir);
5701 return written;
5702 }
5703
5704 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5705 object type, using the /proc file system. */
5706 static int
5707 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5708 unsigned const char *writebuf,
5709 CORE_ADDR offset, int len)
5710 {
5711 long pid = lwpid_of (current_thread);
5712 char buf[128];
5713 int fd = 0;
5714 int ret = 0;
5715
5716 if (!writebuf && !readbuf)
5717 return -1;
5718
5719 if (!*annex)
5720 {
5721 if (!readbuf)
5722 return -1;
5723 else
5724 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5725 }
5726
5727 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5728 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5729 if (fd <= 0)
5730 return -1;
5731
5732 if (offset != 0
5733 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5734 {
5735 close (fd);
5736 return 0;
5737 }
5738
5739 if (writebuf)
5740 ret = write (fd, writebuf, (size_t) len);
5741 else
5742 ret = read (fd, readbuf, (size_t) len);
5743
5744 close (fd);
5745 return ret;
5746 }
5747
5748 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5749 struct target_loadseg
5750 {
5751 /* Core address to which the segment is mapped. */
5752 Elf32_Addr addr;
5753 /* VMA recorded in the program header. */
5754 Elf32_Addr p_vaddr;
5755 /* Size of this segment in memory. */
5756 Elf32_Word p_memsz;
5757 };
5758
5759 # if defined PT_GETDSBT
5760 struct target_loadmap
5761 {
5762 /* Protocol version number, must be zero. */
5763 Elf32_Word version;
5764 /* Pointer to the DSBT table, its size, and the DSBT index. */
5765 unsigned *dsbt_table;
5766 unsigned dsbt_size, dsbt_index;
5767 /* Number of segments in this map. */
5768 Elf32_Word nsegs;
5769 /* The actual memory map. */
5770 struct target_loadseg segs[/*nsegs*/];
5771 };
5772 # define LINUX_LOADMAP PT_GETDSBT
5773 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5774 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5775 # else
5776 struct target_loadmap
5777 {
5778 /* Protocol version number, must be zero. */
5779 Elf32_Half version;
5780 /* Number of segments in this map. */
5781 Elf32_Half nsegs;
5782 /* The actual memory map. */
5783 struct target_loadseg segs[/*nsegs*/];
5784 };
5785 # define LINUX_LOADMAP PTRACE_GETFDPIC
5786 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5787 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5788 # endif
5789
5790 static int
5791 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5792 unsigned char *myaddr, unsigned int len)
5793 {
5794 int pid = lwpid_of (current_thread);
5795 int addr = -1;
5796 struct target_loadmap *data = NULL;
5797 unsigned int actual_length, copy_length;
5798
5799 if (strcmp (annex, "exec") == 0)
5800 addr = (int) LINUX_LOADMAP_EXEC;
5801 else if (strcmp (annex, "interp") == 0)
5802 addr = (int) LINUX_LOADMAP_INTERP;
5803 else
5804 return -1;
5805
5806 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5807 return -1;
5808
5809 if (data == NULL)
5810 return -1;
5811
5812 actual_length = sizeof (struct target_loadmap)
5813 + sizeof (struct target_loadseg) * data->nsegs;
5814
5815 if (offset < 0 || offset > actual_length)
5816 return -1;
5817
5818 copy_length = actual_length - offset < len ? actual_length - offset : len;
5819 memcpy (myaddr, (char *) data + offset, copy_length);
5820 return copy_length;
5821 }
5822 #else
5823 # define linux_read_loadmap NULL
5824 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5825
5826 static void
5827 linux_process_qsupported (const char *query)
5828 {
5829 if (the_low_target.process_qsupported != NULL)
5830 the_low_target.process_qsupported (query);
5831 }
5832
5833 static int
5834 linux_supports_tracepoints (void)
5835 {
5836 if (*the_low_target.supports_tracepoints == NULL)
5837 return 0;
5838
5839 return (*the_low_target.supports_tracepoints) ();
5840 }
5841
5842 static CORE_ADDR
5843 linux_read_pc (struct regcache *regcache)
5844 {
5845 if (the_low_target.get_pc == NULL)
5846 return 0;
5847
5848 return (*the_low_target.get_pc) (regcache);
5849 }
5850
5851 static void
5852 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5853 {
5854 gdb_assert (the_low_target.set_pc != NULL);
5855
5856 (*the_low_target.set_pc) (regcache, pc);
5857 }
5858
5859 static int
5860 linux_thread_stopped (struct thread_info *thread)
5861 {
5862 return get_thread_lwp (thread)->stopped;
5863 }
5864
5865 /* This exposes stop-all-threads functionality to other modules. */
5866
5867 static void
5868 linux_pause_all (int freeze)
5869 {
5870 stop_all_lwps (freeze, NULL);
5871 }
5872
5873 /* This exposes unstop-all-threads functionality to other gdbserver
5874 modules. */
5875
5876 static void
5877 linux_unpause_all (int unfreeze)
5878 {
5879 unstop_all_lwps (unfreeze, NULL);
5880 }
5881
5882 static int
5883 linux_prepare_to_access_memory (void)
5884 {
5885 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5886 running LWP. */
5887 if (non_stop)
5888 linux_pause_all (1);
5889 return 0;
5890 }
5891
5892 static void
5893 linux_done_accessing_memory (void)
5894 {
5895 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5896 running LWP. */
5897 if (non_stop)
5898 linux_unpause_all (1);
5899 }
5900
5901 static int
5902 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5903 CORE_ADDR collector,
5904 CORE_ADDR lockaddr,
5905 ULONGEST orig_size,
5906 CORE_ADDR *jump_entry,
5907 CORE_ADDR *trampoline,
5908 ULONGEST *trampoline_size,
5909 unsigned char *jjump_pad_insn,
5910 ULONGEST *jjump_pad_insn_size,
5911 CORE_ADDR *adjusted_insn_addr,
5912 CORE_ADDR *adjusted_insn_addr_end,
5913 char *err)
5914 {
5915 return (*the_low_target.install_fast_tracepoint_jump_pad)
5916 (tpoint, tpaddr, collector, lockaddr, orig_size,
5917 jump_entry, trampoline, trampoline_size,
5918 jjump_pad_insn, jjump_pad_insn_size,
5919 adjusted_insn_addr, adjusted_insn_addr_end,
5920 err);
5921 }
5922
5923 static struct emit_ops *
5924 linux_emit_ops (void)
5925 {
5926 if (the_low_target.emit_ops != NULL)
5927 return (*the_low_target.emit_ops) ();
5928 else
5929 return NULL;
5930 }
5931
5932 static int
5933 linux_get_min_fast_tracepoint_insn_len (void)
5934 {
5935 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5936 }
5937
5938 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5939
5940 static int
5941 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5942 CORE_ADDR *phdr_memaddr, int *num_phdr)
5943 {
5944 char filename[PATH_MAX];
5945 int fd;
5946 const int auxv_size = is_elf64
5947 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5948 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5949
5950 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5951
5952 fd = open (filename, O_RDONLY);
5953 if (fd < 0)
5954 return 1;
5955
5956 *phdr_memaddr = 0;
5957 *num_phdr = 0;
5958 while (read (fd, buf, auxv_size) == auxv_size
5959 && (*phdr_memaddr == 0 || *num_phdr == 0))
5960 {
5961 if (is_elf64)
5962 {
5963 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5964
5965 switch (aux->a_type)
5966 {
5967 case AT_PHDR:
5968 *phdr_memaddr = aux->a_un.a_val;
5969 break;
5970 case AT_PHNUM:
5971 *num_phdr = aux->a_un.a_val;
5972 break;
5973 }
5974 }
5975 else
5976 {
5977 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5978
5979 switch (aux->a_type)
5980 {
5981 case AT_PHDR:
5982 *phdr_memaddr = aux->a_un.a_val;
5983 break;
5984 case AT_PHNUM:
5985 *num_phdr = aux->a_un.a_val;
5986 break;
5987 }
5988 }
5989 }
5990
5991 close (fd);
5992
5993 if (*phdr_memaddr == 0 || *num_phdr == 0)
5994 {
5995 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5996 "phdr_memaddr = %ld, phdr_num = %d",
5997 (long) *phdr_memaddr, *num_phdr);
5998 return 2;
5999 }
6000
6001 return 0;
6002 }
6003
6004 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6005
6006 static CORE_ADDR
6007 get_dynamic (const int pid, const int is_elf64)
6008 {
6009 CORE_ADDR phdr_memaddr, relocation;
6010 int num_phdr, i;
6011 unsigned char *phdr_buf;
6012 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6013
6014 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6015 return 0;
6016
6017 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6018 phdr_buf = alloca (num_phdr * phdr_size);
6019
6020 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6021 return 0;
6022
6023 /* Compute relocation: it is expected to be 0 for "regular" executables,
6024 non-zero for PIE ones. */
6025 relocation = -1;
6026 for (i = 0; relocation == -1 && i < num_phdr; i++)
6027 if (is_elf64)
6028 {
6029 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6030
6031 if (p->p_type == PT_PHDR)
6032 relocation = phdr_memaddr - p->p_vaddr;
6033 }
6034 else
6035 {
6036 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6037
6038 if (p->p_type == PT_PHDR)
6039 relocation = phdr_memaddr - p->p_vaddr;
6040 }
6041
6042 if (relocation == -1)
6043 {
6044 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6045 any real world executables, including PIE executables, have always
6046 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6047 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6048 or present DT_DEBUG anyway (fpc binaries are statically linked).
6049
6050 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6051
6052 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6053
6054 return 0;
6055 }
6056
6057 for (i = 0; i < num_phdr; i++)
6058 {
6059 if (is_elf64)
6060 {
6061 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6062
6063 if (p->p_type == PT_DYNAMIC)
6064 return p->p_vaddr + relocation;
6065 }
6066 else
6067 {
6068 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6069
6070 if (p->p_type == PT_DYNAMIC)
6071 return p->p_vaddr + relocation;
6072 }
6073 }
6074
6075 return 0;
6076 }
6077
6078 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6079 can be 0 if the inferior does not yet have the library list initialized.
6080 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6081 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6082
6083 static CORE_ADDR
6084 get_r_debug (const int pid, const int is_elf64)
6085 {
6086 CORE_ADDR dynamic_memaddr;
6087 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6088 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6089 CORE_ADDR map = -1;
6090
6091 dynamic_memaddr = get_dynamic (pid, is_elf64);
6092 if (dynamic_memaddr == 0)
6093 return map;
6094
6095 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6096 {
6097 if (is_elf64)
6098 {
6099 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6100 #ifdef DT_MIPS_RLD_MAP
6101 union
6102 {
6103 Elf64_Xword map;
6104 unsigned char buf[sizeof (Elf64_Xword)];
6105 }
6106 rld_map;
6107
6108 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6109 {
6110 if (linux_read_memory (dyn->d_un.d_val,
6111 rld_map.buf, sizeof (rld_map.buf)) == 0)
6112 return rld_map.map;
6113 else
6114 break;
6115 }
6116 #endif /* DT_MIPS_RLD_MAP */
6117
6118 if (dyn->d_tag == DT_DEBUG && map == -1)
6119 map = dyn->d_un.d_val;
6120
6121 if (dyn->d_tag == DT_NULL)
6122 break;
6123 }
6124 else
6125 {
6126 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6127 #ifdef DT_MIPS_RLD_MAP
6128 union
6129 {
6130 Elf32_Word map;
6131 unsigned char buf[sizeof (Elf32_Word)];
6132 }
6133 rld_map;
6134
6135 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6136 {
6137 if (linux_read_memory (dyn->d_un.d_val,
6138 rld_map.buf, sizeof (rld_map.buf)) == 0)
6139 return rld_map.map;
6140 else
6141 break;
6142 }
6143 #endif /* DT_MIPS_RLD_MAP */
6144
6145 if (dyn->d_tag == DT_DEBUG && map == -1)
6146 map = dyn->d_un.d_val;
6147
6148 if (dyn->d_tag == DT_NULL)
6149 break;
6150 }
6151
6152 dynamic_memaddr += dyn_size;
6153 }
6154
6155 return map;
6156 }
6157
6158 /* Read one pointer from MEMADDR in the inferior. */
6159
6160 static int
6161 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6162 {
6163 int ret;
6164
6165 /* Go through a union so this works on either big or little endian
6166 hosts, when the inferior's pointer size is smaller than the size
6167 of CORE_ADDR. It is assumed the inferior's endianness is the
6168 same of the superior's. */
6169 union
6170 {
6171 CORE_ADDR core_addr;
6172 unsigned int ui;
6173 unsigned char uc;
6174 } addr;
6175
6176 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6177 if (ret == 0)
6178 {
6179 if (ptr_size == sizeof (CORE_ADDR))
6180 *ptr = addr.core_addr;
6181 else if (ptr_size == sizeof (unsigned int))
6182 *ptr = addr.ui;
6183 else
6184 gdb_assert_not_reached ("unhandled pointer size");
6185 }
6186 return ret;
6187 }
6188
6189 struct link_map_offsets
6190 {
6191 /* Offset and size of r_debug.r_version. */
6192 int r_version_offset;
6193
6194 /* Offset and size of r_debug.r_map. */
6195 int r_map_offset;
6196
6197 /* Offset to l_addr field in struct link_map. */
6198 int l_addr_offset;
6199
6200 /* Offset to l_name field in struct link_map. */
6201 int l_name_offset;
6202
6203 /* Offset to l_ld field in struct link_map. */
6204 int l_ld_offset;
6205
6206 /* Offset to l_next field in struct link_map. */
6207 int l_next_offset;
6208
6209 /* Offset to l_prev field in struct link_map. */
6210 int l_prev_offset;
6211 };
6212
6213 /* Construct qXfer:libraries-svr4:read reply. */
6214
6215 static int
6216 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6217 unsigned const char *writebuf,
6218 CORE_ADDR offset, int len)
6219 {
6220 char *document;
6221 unsigned document_len;
6222 struct process_info_private *const priv = current_process ()->priv;
6223 char filename[PATH_MAX];
6224 int pid, is_elf64;
6225
6226 static const struct link_map_offsets lmo_32bit_offsets =
6227 {
6228 0, /* r_version offset. */
6229 4, /* r_debug.r_map offset. */
6230 0, /* l_addr offset in link_map. */
6231 4, /* l_name offset in link_map. */
6232 8, /* l_ld offset in link_map. */
6233 12, /* l_next offset in link_map. */
6234 16 /* l_prev offset in link_map. */
6235 };
6236
6237 static const struct link_map_offsets lmo_64bit_offsets =
6238 {
6239 0, /* r_version offset. */
6240 8, /* r_debug.r_map offset. */
6241 0, /* l_addr offset in link_map. */
6242 8, /* l_name offset in link_map. */
6243 16, /* l_ld offset in link_map. */
6244 24, /* l_next offset in link_map. */
6245 32 /* l_prev offset in link_map. */
6246 };
6247 const struct link_map_offsets *lmo;
6248 unsigned int machine;
6249 int ptr_size;
6250 CORE_ADDR lm_addr = 0, lm_prev = 0;
6251 int allocated = 1024;
6252 char *p;
6253 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6254 int header_done = 0;
6255
6256 if (writebuf != NULL)
6257 return -2;
6258 if (readbuf == NULL)
6259 return -1;
6260
6261 pid = lwpid_of (current_thread);
6262 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6263 is_elf64 = elf_64_file_p (filename, &machine);
6264 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6265 ptr_size = is_elf64 ? 8 : 4;
6266
6267 while (annex[0] != '\0')
6268 {
6269 const char *sep;
6270 CORE_ADDR *addrp;
6271 int len;
6272
6273 sep = strchr (annex, '=');
6274 if (sep == NULL)
6275 break;
6276
6277 len = sep - annex;
6278 if (len == 5 && startswith (annex, "start"))
6279 addrp = &lm_addr;
6280 else if (len == 4 && startswith (annex, "prev"))
6281 addrp = &lm_prev;
6282 else
6283 {
6284 annex = strchr (sep, ';');
6285 if (annex == NULL)
6286 break;
6287 annex++;
6288 continue;
6289 }
6290
6291 annex = decode_address_to_semicolon (addrp, sep + 1);
6292 }
6293
6294 if (lm_addr == 0)
6295 {
6296 int r_version = 0;
6297
6298 if (priv->r_debug == 0)
6299 priv->r_debug = get_r_debug (pid, is_elf64);
6300
6301 /* We failed to find DT_DEBUG. Such situation will not change
6302 for this inferior - do not retry it. Report it to GDB as
6303 E01, see for the reasons at the GDB solib-svr4.c side. */
6304 if (priv->r_debug == (CORE_ADDR) -1)
6305 return -1;
6306
6307 if (priv->r_debug != 0)
6308 {
6309 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6310 (unsigned char *) &r_version,
6311 sizeof (r_version)) != 0
6312 || r_version != 1)
6313 {
6314 warning ("unexpected r_debug version %d", r_version);
6315 }
6316 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6317 &lm_addr, ptr_size) != 0)
6318 {
6319 warning ("unable to read r_map from 0x%lx",
6320 (long) priv->r_debug + lmo->r_map_offset);
6321 }
6322 }
6323 }
6324
6325 document = xmalloc (allocated);
6326 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6327 p = document + strlen (document);
6328
6329 while (lm_addr
6330 && read_one_ptr (lm_addr + lmo->l_name_offset,
6331 &l_name, ptr_size) == 0
6332 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6333 &l_addr, ptr_size) == 0
6334 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6335 &l_ld, ptr_size) == 0
6336 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6337 &l_prev, ptr_size) == 0
6338 && read_one_ptr (lm_addr + lmo->l_next_offset,
6339 &l_next, ptr_size) == 0)
6340 {
6341 unsigned char libname[PATH_MAX];
6342
6343 if (lm_prev != l_prev)
6344 {
6345 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6346 (long) lm_prev, (long) l_prev);
6347 break;
6348 }
6349
6350 /* Ignore the first entry even if it has valid name as the first entry
6351 corresponds to the main executable. The first entry should not be
6352 skipped if the dynamic loader was loaded late by a static executable
6353 (see solib-svr4.c parameter ignore_first). But in such case the main
6354 executable does not have PT_DYNAMIC present and this function already
6355 exited above due to failed get_r_debug. */
6356 if (lm_prev == 0)
6357 {
6358 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6359 p = p + strlen (p);
6360 }
6361 else
6362 {
6363 /* Not checking for error because reading may stop before
6364 we've got PATH_MAX worth of characters. */
6365 libname[0] = '\0';
6366 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6367 libname[sizeof (libname) - 1] = '\0';
6368 if (libname[0] != '\0')
6369 {
6370 /* 6x the size for xml_escape_text below. */
6371 size_t len = 6 * strlen ((char *) libname);
6372 char *name;
6373
6374 if (!header_done)
6375 {
6376 /* Terminate `<library-list-svr4'. */
6377 *p++ = '>';
6378 header_done = 1;
6379 }
6380
6381 while (allocated < p - document + len + 200)
6382 {
6383 /* Expand to guarantee sufficient storage. */
6384 uintptr_t document_len = p - document;
6385
6386 document = xrealloc (document, 2 * allocated);
6387 allocated *= 2;
6388 p = document + document_len;
6389 }
6390
6391 name = xml_escape_text ((char *) libname);
6392 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6393 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6394 name, (unsigned long) lm_addr,
6395 (unsigned long) l_addr, (unsigned long) l_ld);
6396 free (name);
6397 }
6398 }
6399
6400 lm_prev = lm_addr;
6401 lm_addr = l_next;
6402 }
6403
6404 if (!header_done)
6405 {
6406 /* Empty list; terminate `<library-list-svr4'. */
6407 strcpy (p, "/>");
6408 }
6409 else
6410 strcpy (p, "</library-list-svr4>");
6411
6412 document_len = strlen (document);
6413 if (offset < document_len)
6414 document_len -= offset;
6415 else
6416 document_len = 0;
6417 if (len > document_len)
6418 len = document_len;
6419
6420 memcpy (readbuf, document + offset, len);
6421 xfree (document);
6422
6423 return len;
6424 }
6425
6426 #ifdef HAVE_LINUX_BTRACE
6427
6428 /* See to_enable_btrace target method. */
6429
6430 static struct btrace_target_info *
6431 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6432 {
6433 struct btrace_target_info *tinfo;
6434
6435 tinfo = linux_enable_btrace (ptid, conf);
6436
6437 if (tinfo != NULL && tinfo->ptr_bits == 0)
6438 {
6439 struct thread_info *thread = find_thread_ptid (ptid);
6440 struct regcache *regcache = get_thread_regcache (thread, 0);
6441
6442 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6443 }
6444
6445 return tinfo;
6446 }
6447
6448 /* See to_disable_btrace target method. */
6449
6450 static int
6451 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6452 {
6453 enum btrace_error err;
6454
6455 err = linux_disable_btrace (tinfo);
6456 return (err == BTRACE_ERR_NONE ? 0 : -1);
6457 }
6458
6459 /* See to_read_btrace target method. */
6460
6461 static int
6462 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6463 int type)
6464 {
6465 struct btrace_data btrace;
6466 struct btrace_block *block;
6467 enum btrace_error err;
6468 int i;
6469
6470 btrace_data_init (&btrace);
6471
6472 err = linux_read_btrace (&btrace, tinfo, type);
6473 if (err != BTRACE_ERR_NONE)
6474 {
6475 if (err == BTRACE_ERR_OVERFLOW)
6476 buffer_grow_str0 (buffer, "E.Overflow.");
6477 else
6478 buffer_grow_str0 (buffer, "E.Generic Error.");
6479
6480 btrace_data_fini (&btrace);
6481 return -1;
6482 }
6483
6484 switch (btrace.format)
6485 {
6486 case BTRACE_FORMAT_NONE:
6487 buffer_grow_str0 (buffer, "E.No Trace.");
6488 break;
6489
6490 case BTRACE_FORMAT_BTS:
6491 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6492 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6493
6494 for (i = 0;
6495 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6496 i++)
6497 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6498 paddress (block->begin), paddress (block->end));
6499
6500 buffer_grow_str0 (buffer, "</btrace>\n");
6501 break;
6502
6503 default:
6504 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6505
6506 btrace_data_fini (&btrace);
6507 return -1;
6508 }
6509
6510 btrace_data_fini (&btrace);
6511 return 0;
6512 }
6513
6514 /* See to_btrace_conf target method. */
6515
6516 static int
6517 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6518 struct buffer *buffer)
6519 {
6520 const struct btrace_config *conf;
6521
6522 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6523 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6524
6525 conf = linux_btrace_conf (tinfo);
6526 if (conf != NULL)
6527 {
6528 switch (conf->format)
6529 {
6530 case BTRACE_FORMAT_NONE:
6531 break;
6532
6533 case BTRACE_FORMAT_BTS:
6534 buffer_xml_printf (buffer, "<bts");
6535 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6536 buffer_xml_printf (buffer, " />\n");
6537 break;
6538 }
6539 }
6540
6541 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6542 return 0;
6543 }
6544 #endif /* HAVE_LINUX_BTRACE */
6545
6546 /* See nat/linux-nat.h. */
6547
6548 ptid_t
6549 current_lwp_ptid (void)
6550 {
6551 return ptid_of (current_thread);
6552 }
6553
6554 static struct target_ops linux_target_ops = {
6555 linux_create_inferior,
6556 linux_attach,
6557 linux_kill,
6558 linux_detach,
6559 linux_mourn,
6560 linux_join,
6561 linux_thread_alive,
6562 linux_resume,
6563 linux_wait,
6564 linux_fetch_registers,
6565 linux_store_registers,
6566 linux_prepare_to_access_memory,
6567 linux_done_accessing_memory,
6568 linux_read_memory,
6569 linux_write_memory,
6570 linux_look_up_symbols,
6571 linux_request_interrupt,
6572 linux_read_auxv,
6573 linux_supports_z_point_type,
6574 linux_insert_point,
6575 linux_remove_point,
6576 linux_stopped_by_sw_breakpoint,
6577 linux_supports_stopped_by_sw_breakpoint,
6578 linux_stopped_by_hw_breakpoint,
6579 linux_supports_stopped_by_hw_breakpoint,
6580 linux_supports_conditional_breakpoints,
6581 linux_stopped_by_watchpoint,
6582 linux_stopped_data_address,
6583 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6584 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6585 && defined(PT_TEXT_END_ADDR)
6586 linux_read_offsets,
6587 #else
6588 NULL,
6589 #endif
6590 #ifdef USE_THREAD_DB
6591 thread_db_get_tls_address,
6592 #else
6593 NULL,
6594 #endif
6595 linux_qxfer_spu,
6596 hostio_last_error_from_errno,
6597 linux_qxfer_osdata,
6598 linux_xfer_siginfo,
6599 linux_supports_non_stop,
6600 linux_async,
6601 linux_start_non_stop,
6602 linux_supports_multi_process,
6603 linux_supports_fork_events,
6604 linux_supports_vfork_events,
6605 linux_handle_new_gdb_connection,
6606 #ifdef USE_THREAD_DB
6607 thread_db_handle_monitor_command,
6608 #else
6609 NULL,
6610 #endif
6611 linux_common_core_of_thread,
6612 linux_read_loadmap,
6613 linux_process_qsupported,
6614 linux_supports_tracepoints,
6615 linux_read_pc,
6616 linux_write_pc,
6617 linux_thread_stopped,
6618 NULL,
6619 linux_pause_all,
6620 linux_unpause_all,
6621 linux_stabilize_threads,
6622 linux_install_fast_tracepoint_jump_pad,
6623 linux_emit_ops,
6624 linux_supports_disable_randomization,
6625 linux_get_min_fast_tracepoint_insn_len,
6626 linux_qxfer_libraries_svr4,
6627 linux_supports_agent,
6628 #ifdef HAVE_LINUX_BTRACE
6629 linux_supports_btrace,
6630 linux_low_enable_btrace,
6631 linux_low_disable_btrace,
6632 linux_low_read_btrace,
6633 linux_low_btrace_conf,
6634 #else
6635 NULL,
6636 NULL,
6637 NULL,
6638 NULL,
6639 NULL,
6640 #endif
6641 linux_supports_range_stepping,
6642 linux_proc_pid_to_exec_file,
6643 };
6644
6645 static void
6646 linux_init_signals ()
6647 {
6648 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6649 to find what the cancel signal actually is. */
6650 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6651 signal (__SIGRTMIN+1, SIG_IGN);
6652 #endif
6653 }
6654
6655 #ifdef HAVE_LINUX_REGSETS
6656 void
6657 initialize_regsets_info (struct regsets_info *info)
6658 {
6659 for (info->num_regsets = 0;
6660 info->regsets[info->num_regsets].size >= 0;
6661 info->num_regsets++)
6662 ;
6663 }
6664 #endif
6665
6666 void
6667 initialize_low (void)
6668 {
6669 struct sigaction sigchld_action;
6670 memset (&sigchld_action, 0, sizeof (sigchld_action));
6671 set_target_ops (&linux_target_ops);
6672 set_breakpoint_data (the_low_target.breakpoint,
6673 the_low_target.breakpoint_len);
6674 linux_init_signals ();
6675 linux_ptrace_init_warnings ();
6676
6677 sigchld_action.sa_handler = sigchld_handler;
6678 sigemptyset (&sigchld_action.sa_mask);
6679 sigchld_action.sa_flags = SA_RESTART;
6680 sigaction (SIGCHLD, &sigchld_action, NULL);
6681
6682 initialize_low_arch ();
6683
6684 linux_check_ptrace_features ();
6685 }
This page took 0.250616 seconds and 5 git commands to generate.